Compare commits
12 commits
master
...
support_v0
Author | SHA1 | Date | |
---|---|---|---|
7ead92bda7 | |||
60e5d6d500 | |||
65793f7c6d | |||
2fe74b0df1 | |||
161d33c2b7 | |||
1a7c3db67f | |||
c1d90f018b | |||
064e18b277 | |||
21caa904f4 | |||
f74d058c2e | |||
c8ce6e9fe4 | |||
748da78dc7 |
30 changed files with 396 additions and 120 deletions
|
@ -34,6 +34,7 @@ func reloadConfig() error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
cmode.Store(cfg.GetBool("node.kludge_compatibility_mode"))
|
||||||
err = logPrm.SetLevelString(cfg.GetString("logger.level"))
|
err = logPrm.SetLevelString(cfg.GetString("logger.level"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
|
|
@ -43,6 +43,8 @@ func defaultConfiguration(cfg *viper.Viper) {
|
||||||
setControlDefaults(cfg)
|
setControlDefaults(cfg)
|
||||||
|
|
||||||
cfg.SetDefault("governance.disable", false)
|
cfg.SetDefault("governance.disable", false)
|
||||||
|
|
||||||
|
cfg.SetDefault("node.kludge_compatibility_mode", false)
|
||||||
}
|
}
|
||||||
|
|
||||||
func setControlDefaults(cfg *viper.Viper) {
|
func setControlDefaults(cfg *viper.Viper) {
|
||||||
|
|
|
@ -6,6 +6,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"sync"
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/misc"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/misc"
|
||||||
|
@ -37,6 +38,7 @@ var (
|
||||||
cfg *viper.Viper
|
cfg *viper.Viper
|
||||||
configFile *string
|
configFile *string
|
||||||
configDir *string
|
configDir *string
|
||||||
|
cmode = &atomic.Bool{}
|
||||||
)
|
)
|
||||||
|
|
||||||
func exitErr(err error) {
|
func exitErr(err error) {
|
||||||
|
@ -62,6 +64,8 @@ func main() {
|
||||||
cfg, err = newConfig()
|
cfg, err = newConfig()
|
||||||
exitErr(err)
|
exitErr(err)
|
||||||
|
|
||||||
|
cmode.Store(cfg.GetBool("node.kludge_compatibility_mode"))
|
||||||
|
|
||||||
metrics := irMetrics.NewInnerRingMetrics()
|
metrics := irMetrics.NewInnerRingMetrics()
|
||||||
|
|
||||||
err = logPrm.SetLevelString(
|
err = logPrm.SetLevelString(
|
||||||
|
@ -84,7 +88,7 @@ func main() {
|
||||||
metricsCmp = newMetricsComponent()
|
metricsCmp = newMetricsComponent()
|
||||||
metricsCmp.init()
|
metricsCmp.init()
|
||||||
|
|
||||||
innerRing, err = innerring.New(ctx, log, cfg, intErr, metrics)
|
innerRing, err = innerring.New(ctx, log, cfg, intErr, metrics, cmode)
|
||||||
exitErr(err)
|
exitErr(err)
|
||||||
|
|
||||||
pprofCmp.start()
|
pprofCmp.start()
|
||||||
|
|
|
@ -109,6 +109,9 @@ type applicationConfiguration struct {
|
||||||
lowMem bool
|
lowMem bool
|
||||||
rebuildWorkers uint32
|
rebuildWorkers uint32
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// if need to run node in compatibility with other versions mode
|
||||||
|
cmode *atomic.Bool
|
||||||
}
|
}
|
||||||
|
|
||||||
type shardCfg struct {
|
type shardCfg struct {
|
||||||
|
@ -204,10 +207,13 @@ func (a *applicationConfiguration) readConfig(c *config.Config) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// clear if it is rereading
|
// clear if it is rereading
|
||||||
|
cmode := a.cmode
|
||||||
*a = applicationConfiguration{}
|
*a = applicationConfiguration{}
|
||||||
|
a.cmode = cmode
|
||||||
}
|
}
|
||||||
|
|
||||||
a._read = true
|
a._read = true
|
||||||
|
a.cmode.Store(nodeconfig.CompatibilityMode(c))
|
||||||
|
|
||||||
// Logger
|
// Logger
|
||||||
|
|
||||||
|
@ -648,7 +654,11 @@ type cfgControlService struct {
|
||||||
var persistateSideChainLastBlockKey = []byte("side_chain_last_processed_block")
|
var persistateSideChainLastBlockKey = []byte("side_chain_last_processed_block")
|
||||||
|
|
||||||
func initCfg(appCfg *config.Config) *cfg {
|
func initCfg(appCfg *config.Config) *cfg {
|
||||||
c := &cfg{}
|
c := &cfg{
|
||||||
|
applicationConfiguration: applicationConfiguration{
|
||||||
|
cmode: &atomic.Bool{},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
err := c.readConfig(appCfg)
|
err := c.readConfig(appCfg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -292,3 +292,8 @@ func (l PersistentPolicyRulesConfig) Perm() fs.FileMode {
|
||||||
func (l PersistentPolicyRulesConfig) NoSync() bool {
|
func (l PersistentPolicyRulesConfig) NoSync() bool {
|
||||||
return config.BoolSafe((*config.Config)(l.cfg), "no_sync")
|
return config.BoolSafe((*config.Config)(l.cfg), "no_sync")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CompatibilityMode returns true if need to run node in compatibility with previous versions mode.
|
||||||
|
func CompatibilityMode(c *config.Config) bool {
|
||||||
|
return config.BoolSafe(c.Sub(subsection), "kludge_compatibility_mode")
|
||||||
|
}
|
||||||
|
|
|
@ -48,6 +48,7 @@ func initMorphComponents(ctx context.Context, c *cfg) {
|
||||||
}),
|
}),
|
||||||
client.WithSwitchInterval(morphconfig.SwitchInterval(c.appCfg)),
|
client.WithSwitchInterval(morphconfig.SwitchInterval(c.appCfg)),
|
||||||
client.WithMorphCacheMetrics(c.metricsCollector.MorphCacheMetrics()),
|
client.WithMorphCacheMetrics(c.metricsCollector.MorphCacheMetrics()),
|
||||||
|
client.WithCompatibilityMode(c.cmode),
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.log.Info(logs.FrostFSNodeFailedToCreateNeoRPCClient,
|
c.log.Info(logs.FrostFSNodeFailedToCreateNeoRPCClient,
|
||||||
|
|
10
go.mod
10
go.mod
|
@ -14,7 +14,6 @@ require (
|
||||||
git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02
|
git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02
|
||||||
github.com/cheggaaa/pb v1.0.29
|
github.com/cheggaaa/pb v1.0.29
|
||||||
github.com/chzyer/readline v1.5.1
|
github.com/chzyer/readline v1.5.1
|
||||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc
|
|
||||||
github.com/flynn-archive/go-shlex v0.0.0-20150515145356-3f9db97f8568
|
github.com/flynn-archive/go-shlex v0.0.0-20150515145356-3f9db97f8568
|
||||||
github.com/google/uuid v1.6.0
|
github.com/google/uuid v1.6.0
|
||||||
github.com/hashicorp/golang-lru/v2 v2.0.7
|
github.com/hashicorp/golang-lru/v2 v2.0.7
|
||||||
|
@ -40,8 +39,8 @@ require (
|
||||||
go.uber.org/zap v1.26.0
|
go.uber.org/zap v1.26.0
|
||||||
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a
|
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a
|
||||||
golang.org/x/sync v0.6.0
|
golang.org/x/sync v0.6.0
|
||||||
golang.org/x/sys v0.16.0
|
golang.org/x/sys v0.18.0
|
||||||
golang.org/x/term v0.16.0
|
golang.org/x/term v0.18.0
|
||||||
google.golang.org/grpc v1.61.0
|
google.golang.org/grpc v1.61.0
|
||||||
google.golang.org/protobuf v1.33.0
|
google.golang.org/protobuf v1.33.0
|
||||||
gopkg.in/yaml.v3 v3.0.1
|
gopkg.in/yaml.v3 v3.0.1
|
||||||
|
@ -65,6 +64,7 @@ require (
|
||||||
github.com/consensys/bavard v0.1.13 // indirect
|
github.com/consensys/bavard v0.1.13 // indirect
|
||||||
github.com/consensys/gnark-crypto v0.12.2-0.20231222162921-eb75782795d2 // indirect
|
github.com/consensys/gnark-crypto v0.12.2-0.20231222162921-eb75782795d2 // indirect
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect
|
github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect
|
||||||
|
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||||
github.com/davidmz/go-pageant v1.0.2 // indirect
|
github.com/davidmz/go-pageant v1.0.2 // indirect
|
||||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect
|
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect
|
||||||
github.com/fsnotify/fsnotify v1.7.0 // indirect
|
github.com/fsnotify/fsnotify v1.7.0 // indirect
|
||||||
|
@ -121,8 +121,8 @@ require (
|
||||||
go.opentelemetry.io/otel/sdk v1.22.0 // indirect
|
go.opentelemetry.io/otel/sdk v1.22.0 // indirect
|
||||||
go.opentelemetry.io/proto/otlp v1.1.0 // indirect
|
go.opentelemetry.io/proto/otlp v1.1.0 // indirect
|
||||||
go.uber.org/multierr v1.11.0 // indirect
|
go.uber.org/multierr v1.11.0 // indirect
|
||||||
golang.org/x/crypto v0.18.0 // indirect
|
golang.org/x/crypto v0.21.0 // indirect
|
||||||
golang.org/x/net v0.20.0 // indirect
|
golang.org/x/net v0.23.0 // indirect
|
||||||
golang.org/x/text v0.14.0 // indirect
|
golang.org/x/text v0.14.0 // indirect
|
||||||
google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80 // indirect
|
google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80 // indirect
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 // indirect
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 // indirect
|
||||||
|
|
16
go.sum
16
go.sum
|
@ -316,8 +316,8 @@ golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5
|
||||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||||
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||||
golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
|
golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
|
||||||
golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc=
|
golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA=
|
||||||
golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
|
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
|
||||||
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRjZPVIx+MXInMzdvQqcA=
|
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRjZPVIx+MXInMzdvQqcA=
|
||||||
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08=
|
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08=
|
||||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
|
@ -339,8 +339,8 @@ golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug
|
||||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||||
golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo=
|
golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs=
|
||||||
golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
|
golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
|
||||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
@ -375,15 +375,15 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||||
golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU=
|
golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4=
|
||||||
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||||
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
||||||
golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0=
|
golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0=
|
||||||
golang.org/x/term v0.16.0 h1:m+B6fahuftsE9qjo0VWp2FW0mB3MTJvR0BaMQrq0pmE=
|
golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8=
|
||||||
golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY=
|
golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
|
|
|
@ -462,6 +462,7 @@ func (s *Server) initMorph(ctx context.Context, cfg *viper.Viper, errChan chan<-
|
||||||
name: morphPrefix,
|
name: morphPrefix,
|
||||||
from: fromSideChainBlock,
|
from: fromSideChainBlock,
|
||||||
morphCacheMetric: s.irMetrics.MorphCacheMetrics(),
|
morphCacheMetric: s.irMetrics.MorphCacheMetrics(),
|
||||||
|
cmode: s.cmode,
|
||||||
}
|
}
|
||||||
|
|
||||||
// create morph client
|
// create morph client
|
||||||
|
|
|
@ -103,6 +103,8 @@ type (
|
||||||
// should report start errors
|
// should report start errors
|
||||||
// to the application.
|
// to the application.
|
||||||
runners []func(chan<- error) error
|
runners []func(chan<- error) error
|
||||||
|
|
||||||
|
cmode *atomic.Bool
|
||||||
}
|
}
|
||||||
|
|
||||||
chainParams struct {
|
chainParams struct {
|
||||||
|
@ -113,6 +115,7 @@ type (
|
||||||
sgn *transaction.Signer
|
sgn *transaction.Signer
|
||||||
from uint32 // block height
|
from uint32 // block height
|
||||||
morphCacheMetric metrics.MorphCacheMetrics
|
morphCacheMetric metrics.MorphCacheMetrics
|
||||||
|
cmode *atomic.Bool
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -330,12 +333,13 @@ func (s *Server) registerStarter(f func() error) {
|
||||||
|
|
||||||
// New creates instance of inner ring sever structure.
|
// New creates instance of inner ring sever structure.
|
||||||
func New(ctx context.Context, log *logger.Logger, cfg *viper.Viper, errChan chan<- error,
|
func New(ctx context.Context, log *logger.Logger, cfg *viper.Viper, errChan chan<- error,
|
||||||
metrics *metrics.InnerRingServiceMetrics,
|
metrics *metrics.InnerRingServiceMetrics, cmode *atomic.Bool,
|
||||||
) (*Server, error) {
|
) (*Server, error) {
|
||||||
var err error
|
var err error
|
||||||
server := &Server{
|
server := &Server{
|
||||||
log: log,
|
log: log,
|
||||||
irMetrics: metrics,
|
irMetrics: metrics,
|
||||||
|
cmode: cmode,
|
||||||
}
|
}
|
||||||
|
|
||||||
server.sdNotify, err = server.initSdNotify(cfg)
|
server.sdNotify, err = server.initSdNotify(cfg)
|
||||||
|
@ -485,6 +489,7 @@ func createClient(ctx context.Context, p *chainParams, errChan chan<- error) (*c
|
||||||
}),
|
}),
|
||||||
client.WithSwitchInterval(p.cfg.GetDuration(p.name+".switch_interval")),
|
client.WithSwitchInterval(p.cfg.GetDuration(p.name+".switch_interval")),
|
||||||
client.WithMorphCacheMetrics(p.morphCacheMetric),
|
client.WithMorphCacheMetrics(p.morphCacheMetric),
|
||||||
|
client.WithCompatibilityMode(p.cmode),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -210,7 +210,7 @@ func (e *StorageEngine) TreeGetChildren(ctx context.Context, cid cidSDK.ID, tree
|
||||||
}
|
}
|
||||||
|
|
||||||
// TreeSortedByFilename implements the pilorama.Forest interface.
|
// TreeSortedByFilename implements the pilorama.Forest interface.
|
||||||
func (e *StorageEngine) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeID pilorama.Node, last string, count int) ([]pilorama.NodeInfo, string, error) {
|
func (e *StorageEngine) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeID pilorama.Node, last *string, count int) ([]pilorama.NodeInfo, *string, error) {
|
||||||
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.TreeSortedByFilename",
|
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.TreeSortedByFilename",
|
||||||
trace.WithAttributes(
|
trace.WithAttributes(
|
||||||
attribute.String("container_id", cid.EncodeToString()),
|
attribute.String("container_id", cid.EncodeToString()),
|
||||||
|
@ -222,7 +222,7 @@ func (e *StorageEngine) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID,
|
||||||
|
|
||||||
var err error
|
var err error
|
||||||
var nodes []pilorama.NodeInfo
|
var nodes []pilorama.NodeInfo
|
||||||
var cursor string
|
var cursor *string
|
||||||
for _, sh := range e.sortShards(cid) {
|
for _, sh := range e.sortShards(cid) {
|
||||||
nodes, cursor, err = sh.TreeSortedByFilename(ctx, cid, treeID, nodeID, last, count)
|
nodes, cursor, err = sh.TreeSortedByFilename(ctx, cid, treeID, nodeID, last, count)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -232,14 +232,19 @@ func (db *DB) ContainerCount(ctx context.Context, id cid.ID) (ObjectCounters, er
|
||||||
}
|
}
|
||||||
|
|
||||||
func (db *DB) incCounters(tx *bbolt.Tx, cnrID cid.ID, isUserObject bool) error {
|
func (db *DB) incCounters(tx *bbolt.Tx, cnrID cid.ID, isUserObject bool) error {
|
||||||
if err := db.updateShardObjectCounter(tx, phy, 1, true); err != nil {
|
b := tx.Bucket(shardInfoBucket)
|
||||||
|
if b == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := db.updateShardObjectCounterBucket(b, phy, 1, true); err != nil {
|
||||||
return fmt.Errorf("could not increase phy object counter: %w", err)
|
return fmt.Errorf("could not increase phy object counter: %w", err)
|
||||||
}
|
}
|
||||||
if err := db.updateShardObjectCounter(tx, logical, 1, true); err != nil {
|
if err := db.updateShardObjectCounterBucket(b, logical, 1, true); err != nil {
|
||||||
return fmt.Errorf("could not increase logical object counter: %w", err)
|
return fmt.Errorf("could not increase logical object counter: %w", err)
|
||||||
}
|
}
|
||||||
if isUserObject {
|
if isUserObject {
|
||||||
if err := db.updateShardObjectCounter(tx, user, 1, true); err != nil {
|
if err := db.updateShardObjectCounterBucket(b, user, 1, true); err != nil {
|
||||||
return fmt.Errorf("could not increase user object counter: %w", err)
|
return fmt.Errorf("could not increase user object counter: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -252,6 +257,10 @@ func (db *DB) updateShardObjectCounter(tx *bbolt.Tx, typ objectType, delta uint6
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return db.updateShardObjectCounterBucket(b, typ, delta, inc)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*DB) updateShardObjectCounterBucket(b *bbolt.Bucket, typ objectType, delta uint64, inc bool) error {
|
||||||
var counter uint64
|
var counter uint64
|
||||||
var counterKey []byte
|
var counterKey []byte
|
||||||
|
|
||||||
|
|
|
@ -36,6 +36,14 @@ func (r StorageIDRes) StorageID() []byte {
|
||||||
// StorageID returns storage descriptor for objects from the blobstor.
|
// StorageID returns storage descriptor for objects from the blobstor.
|
||||||
// It is put together with the object can makes get/delete operation faster.
|
// It is put together with the object can makes get/delete operation faster.
|
||||||
func (db *DB) StorageID(ctx context.Context, prm StorageIDPrm) (res StorageIDRes, err error) {
|
func (db *DB) StorageID(ctx context.Context, prm StorageIDPrm) (res StorageIDRes, err error) {
|
||||||
|
var (
|
||||||
|
startedAt = time.Now()
|
||||||
|
success = false
|
||||||
|
)
|
||||||
|
defer func() {
|
||||||
|
db.metrics.AddMethodDuration("StorageID", time.Since(startedAt), success)
|
||||||
|
}()
|
||||||
|
|
||||||
_, span := tracing.StartSpanFromContext(ctx, "metabase.StorageID",
|
_, span := tracing.StartSpanFromContext(ctx, "metabase.StorageID",
|
||||||
trace.WithAttributes(
|
trace.WithAttributes(
|
||||||
attribute.String("address", prm.addr.EncodeToString()),
|
attribute.String("address", prm.addr.EncodeToString()),
|
||||||
|
@ -54,7 +62,7 @@ func (db *DB) StorageID(ctx context.Context, prm StorageIDPrm) (res StorageIDRes
|
||||||
|
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
|
success = err == nil
|
||||||
return res, metaerr.Wrap(err)
|
return res, metaerr.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1003,7 +1003,7 @@ func (t *boltForest) hasFewChildren(b *bbolt.Bucket, nodeID Node, threshold int)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TreeSortedByFilename implements the Forest interface.
|
// TreeSortedByFilename implements the Forest interface.
|
||||||
func (t *boltForest) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeID Node, last string, count int) ([]NodeInfo, string, error) {
|
func (t *boltForest) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeID Node, last *string, count int) ([]NodeInfo, *string, error) {
|
||||||
var (
|
var (
|
||||||
startedAt = time.Now()
|
startedAt = time.Now()
|
||||||
success = false
|
success = false
|
||||||
|
@ -1025,7 +1025,7 @@ func (t *boltForest) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, tr
|
||||||
defer t.modeMtx.RUnlock()
|
defer t.modeMtx.RUnlock()
|
||||||
|
|
||||||
if t.mode.NoMetabase() {
|
if t.mode.NoMetabase() {
|
||||||
return nil, "", ErrDegradedMode
|
return nil, last, ErrDegradedMode
|
||||||
}
|
}
|
||||||
|
|
||||||
h := newHeap(last, count)
|
h := newHeap(last, count)
|
||||||
|
@ -1069,20 +1069,25 @@ func (t *boltForest) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, tr
|
||||||
}
|
}
|
||||||
|
|
||||||
if fewChildren {
|
if fewChildren {
|
||||||
result = sortAndCut(result, []byte(last))
|
result = sortAndCut(result, last)
|
||||||
}
|
}
|
||||||
if len(result) != 0 {
|
if len(result) != 0 {
|
||||||
last = string(result[len(result)-1].Meta.GetAttr(AttributeFilename))
|
s := string(result[len(result)-1].Meta.GetAttr(AttributeFilename))
|
||||||
|
last = &s
|
||||||
}
|
}
|
||||||
return result, last, metaerr.Wrap(err)
|
return result, last, metaerr.Wrap(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func sortAndCut(result []NodeInfo, last []byte) []NodeInfo {
|
func sortAndCut(result []NodeInfo, last *string) []NodeInfo {
|
||||||
|
var lastBytes []byte
|
||||||
|
if last != nil {
|
||||||
|
lastBytes = []byte(*last)
|
||||||
|
}
|
||||||
sort.Slice(result, func(i, j int) bool {
|
sort.Slice(result, func(i, j int) bool {
|
||||||
return bytes.Compare(result[i].Meta.GetAttr(AttributeFilename), result[j].Meta.GetAttr(AttributeFilename)) == -1
|
return bytes.Compare(result[i].Meta.GetAttr(AttributeFilename), result[j].Meta.GetAttr(AttributeFilename)) == -1
|
||||||
})
|
})
|
||||||
for i := range result {
|
for i := range result {
|
||||||
if bytes.Compare(last, result[i].Meta.GetAttr(AttributeFilename)) == -1 {
|
if lastBytes == nil || bytes.Compare(lastBytes, result[i].Meta.GetAttr(AttributeFilename)) == -1 {
|
||||||
return result[i:]
|
return result[i:]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -156,11 +156,11 @@ func (f *memoryForest) TreeGetMeta(_ context.Context, cid cid.ID, treeID string,
|
||||||
}
|
}
|
||||||
|
|
||||||
// TreeSortedByFilename implements the Forest interface.
|
// TreeSortedByFilename implements the Forest interface.
|
||||||
func (f *memoryForest) TreeSortedByFilename(_ context.Context, cid cid.ID, treeID string, nodeID Node, start string, count int) ([]NodeInfo, string, error) {
|
func (f *memoryForest) TreeSortedByFilename(_ context.Context, cid cid.ID, treeID string, nodeID Node, start *string, count int) ([]NodeInfo, *string, error) {
|
||||||
fullID := cid.String() + "/" + treeID
|
fullID := cid.String() + "/" + treeID
|
||||||
s, ok := f.treeMap[fullID]
|
s, ok := f.treeMap[fullID]
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, "", ErrTreeNotFound
|
return nil, start, ErrTreeNotFound
|
||||||
}
|
}
|
||||||
if count == 0 {
|
if count == 0 {
|
||||||
return nil, start, nil
|
return nil, start, nil
|
||||||
|
@ -169,7 +169,14 @@ func (f *memoryForest) TreeSortedByFilename(_ context.Context, cid cid.ID, treeI
|
||||||
children := s.tree.getChildren(nodeID)
|
children := s.tree.getChildren(nodeID)
|
||||||
res := make([]NodeInfo, 0, len(children))
|
res := make([]NodeInfo, 0, len(children))
|
||||||
for _, childID := range children {
|
for _, childID := range children {
|
||||||
if len(s.infoMap[childID].Meta.GetAttr(AttributeFilename)) == 0 {
|
var found bool
|
||||||
|
for _, kv := range s.infoMap[childID].Meta.Items {
|
||||||
|
if kv.Key == AttributeFilename {
|
||||||
|
found = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !found {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
res = append(res, NodeInfo{
|
res = append(res, NodeInfo{
|
||||||
|
@ -179,22 +186,24 @@ func (f *memoryForest) TreeSortedByFilename(_ context.Context, cid cid.ID, treeI
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
if len(res) == 0 {
|
if len(res) == 0 {
|
||||||
return res, "", nil
|
return res, start, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
sort.Slice(res, func(i, j int) bool {
|
sort.Slice(res, func(i, j int) bool {
|
||||||
return bytes.Compare(res[i].Meta.GetAttr(AttributeFilename), res[j].Meta.GetAttr(AttributeFilename)) == -1
|
return bytes.Compare(res[i].Meta.GetAttr(AttributeFilename), res[j].Meta.GetAttr(AttributeFilename)) == -1
|
||||||
})
|
})
|
||||||
for i := range res {
|
for i := range res {
|
||||||
if string(res[i].Meta.GetAttr(AttributeFilename)) > start {
|
if start == nil || string(res[i].Meta.GetAttr(AttributeFilename)) > *start {
|
||||||
finish := i + count
|
finish := i + count
|
||||||
if len(res) < finish {
|
if len(res) < finish {
|
||||||
finish = len(res)
|
finish = len(res)
|
||||||
}
|
}
|
||||||
return res[i:finish], string(res[finish-1].Meta.GetAttr(AttributeFilename)), nil
|
last := string(res[finish-1].Meta.GetAttr(AttributeFilename))
|
||||||
|
return res[i:finish], &last, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil, string(res[len(res)-1].Meta.GetAttr(AttributeFilename)), nil
|
last := string(res[len(res)-1].Meta.GetAttr(AttributeFilename))
|
||||||
|
return nil, &last, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// TreeGetChildren implements the Forest interface.
|
// TreeGetChildren implements the Forest interface.
|
||||||
|
|
|
@ -16,7 +16,6 @@ import (
|
||||||
cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||||
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
|
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
|
||||||
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||||
"github.com/davecgh/go-spew/spew"
|
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
|
@ -216,7 +215,7 @@ func BenchmarkForestSortedIteration(b *testing.B) {
|
||||||
|
|
||||||
b.Run(providers[i].name+",root", func(b *testing.B) {
|
b.Run(providers[i].name+",root", func(b *testing.B) {
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
res, _, err := f.TreeSortedByFilename(context.Background(), cnr, treeID, RootID, "", 100)
|
res, _, err := f.TreeSortedByFilename(context.Background(), cnr, treeID, RootID, nil, 100)
|
||||||
if err != nil || len(res) != 100 {
|
if err != nil || len(res) != 100 {
|
||||||
b.Fatalf("err %v, count %d", err, len(res))
|
b.Fatalf("err %v, count %d", err, len(res))
|
||||||
}
|
}
|
||||||
|
@ -224,7 +223,7 @@ func BenchmarkForestSortedIteration(b *testing.B) {
|
||||||
})
|
})
|
||||||
b.Run(providers[i].name+",leaf", func(b *testing.B) {
|
b.Run(providers[i].name+",leaf", func(b *testing.B) {
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
res, _, err := f.TreeSortedByFilename(context.Background(), cnr, treeID, 1, "", 100)
|
res, _, err := f.TreeSortedByFilename(context.Background(), cnr, treeID, 1, nil, 100)
|
||||||
if err != nil || len(res) != 0 {
|
if err != nil || len(res) != 0 {
|
||||||
b.FailNow()
|
b.FailNow()
|
||||||
}
|
}
|
||||||
|
@ -247,14 +246,14 @@ func testForestTreeSortedIteration(t *testing.T, s ForestStorage) {
|
||||||
cid := cidtest.ID()
|
cid := cidtest.ID()
|
||||||
d := CIDDescriptor{cid, 0, 1}
|
d := CIDDescriptor{cid, 0, 1}
|
||||||
treeID := "version"
|
treeID := "version"
|
||||||
treeAdd := func(t *testing.T, ts int) {
|
treeAdd := func(t *testing.T, ts int, filename string) {
|
||||||
_, err := s.TreeMove(context.Background(), d, treeID, &Move{
|
_, err := s.TreeMove(context.Background(), d, treeID, &Move{
|
||||||
Child: RootID + uint64(ts),
|
Child: RootID + uint64(ts),
|
||||||
Parent: RootID,
|
Parent: RootID,
|
||||||
Meta: Meta{
|
Meta: Meta{
|
||||||
Time: Timestamp(ts),
|
Time: Timestamp(ts),
|
||||||
Items: []KeyValue{
|
Items: []KeyValue{
|
||||||
{Key: AttributeFilename, Value: []byte(strconv.Itoa(ts))},
|
{Key: AttributeFilename, Value: []byte(filename)},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
@ -262,20 +261,20 @@ func testForestTreeSortedIteration(t *testing.T, s ForestStorage) {
|
||||||
}
|
}
|
||||||
|
|
||||||
const count = 9
|
const count = 9
|
||||||
for i := 0; i < count; i++ {
|
treeAdd(t, 1, "")
|
||||||
treeAdd(t, i+1)
|
for i := 1; i < count; i++ {
|
||||||
|
treeAdd(t, i+1, strconv.Itoa(i+1))
|
||||||
}
|
}
|
||||||
|
|
||||||
var result []NodeInfo
|
var result []NodeInfo
|
||||||
treeAppend := func(t *testing.T, last string, count int) string {
|
treeAppend := func(t *testing.T, last *string, count int) *string {
|
||||||
res, cursor, err := s.TreeSortedByFilename(context.Background(), d.CID, treeID, RootID, last, count)
|
res, cursor, err := s.TreeSortedByFilename(context.Background(), d.CID, treeID, RootID, last, count)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
result = append(result, res...)
|
result = append(result, res...)
|
||||||
spew.Dump(last, res)
|
|
||||||
return cursor
|
return cursor
|
||||||
}
|
}
|
||||||
|
|
||||||
last := treeAppend(t, "", 2)
|
last := treeAppend(t, nil, 2)
|
||||||
last = treeAppend(t, last, 3)
|
last = treeAppend(t, last, 3)
|
||||||
last = treeAppend(t, last, 0)
|
last = treeAppend(t, last, 0)
|
||||||
last = treeAppend(t, last, 1)
|
last = treeAppend(t, last, 1)
|
||||||
|
@ -284,8 +283,12 @@ func testForestTreeSortedIteration(t *testing.T, s ForestStorage) {
|
||||||
require.Len(t, result, count)
|
require.Len(t, result, count)
|
||||||
for i := range result {
|
for i := range result {
|
||||||
require.Equal(t, RootID+uint64(i+1), result[i].ID)
|
require.Equal(t, RootID+uint64(i+1), result[i].ID)
|
||||||
|
if i == 0 {
|
||||||
|
require.Equal(t, "", string(result[i].Meta.GetAttr(AttributeFilename)))
|
||||||
|
} else {
|
||||||
require.Equal(t, strconv.Itoa(RootID+i+1), string(result[i].Meta.GetAttr(AttributeFilename)))
|
require.Equal(t, strconv.Itoa(RootID+i+1), string(result[i].Meta.GetAttr(AttributeFilename)))
|
||||||
}
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestForest_TreeSortedFilename(t *testing.T) {
|
func TestForest_TreeSortedFilename(t *testing.T) {
|
||||||
|
@ -343,7 +346,7 @@ func testForestTreeSortedByFilename(t *testing.T, s ForestStorage) {
|
||||||
}
|
}
|
||||||
|
|
||||||
getChildren := func(t *testing.T, id Node) []NodeInfo {
|
getChildren := func(t *testing.T, id Node) []NodeInfo {
|
||||||
res, _, err := s.TreeSortedByFilename(context.Background(), d.CID, treeID, id, "", len(items))
|
res, _, err := s.TreeSortedByFilename(context.Background(), d.CID, treeID, id, nil, len(items))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
return res
|
return res
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,6 +17,7 @@ func (h filenameHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] }
|
||||||
func (h *filenameHeap) Push(x any) {
|
func (h *filenameHeap) Push(x any) {
|
||||||
*h = append(*h, x.(heapInfo))
|
*h = append(*h, x.(heapInfo))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *filenameHeap) Pop() any {
|
func (h *filenameHeap) Pop() any {
|
||||||
old := *h
|
old := *h
|
||||||
n := len(old)
|
n := len(old)
|
||||||
|
@ -27,13 +28,13 @@ func (h *filenameHeap) Pop() any {
|
||||||
|
|
||||||
// fixedHeap maintains a fixed number of smallest elements started at some point.
|
// fixedHeap maintains a fixed number of smallest elements started at some point.
|
||||||
type fixedHeap struct {
|
type fixedHeap struct {
|
||||||
start string
|
start *string
|
||||||
max string
|
max string
|
||||||
count int
|
count int
|
||||||
h *filenameHeap
|
h *filenameHeap
|
||||||
}
|
}
|
||||||
|
|
||||||
func newHeap(start string, count int) *fixedHeap {
|
func newHeap(start *string, count int) *fixedHeap {
|
||||||
h := new(filenameHeap)
|
h := new(filenameHeap)
|
||||||
heap.Init(h)
|
heap.Init(h)
|
||||||
|
|
||||||
|
@ -46,7 +47,7 @@ func newHeap(start string, count int) *fixedHeap {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *fixedHeap) push(id Node, filename string) bool {
|
func (h *fixedHeap) push(id Node, filename string) bool {
|
||||||
if filename == "" || filename <= h.start {
|
if h.start != nil && filename <= *h.start {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
heap.Push(h.h, heapInfo{id: id, filename: filename})
|
heap.Push(h.h, heapInfo{id: id, filename: filename})
|
||||||
|
|
|
@ -35,7 +35,7 @@ type Forest interface {
|
||||||
TreeGetChildren(ctx context.Context, cid cidSDK.ID, treeID string, nodeID Node) ([]NodeInfo, error)
|
TreeGetChildren(ctx context.Context, cid cidSDK.ID, treeID string, nodeID Node) ([]NodeInfo, error)
|
||||||
// TreeSortedByFilename returns children of the node with the specified ID. The nodes are sorted by the filename attribute..
|
// TreeSortedByFilename returns children of the node with the specified ID. The nodes are sorted by the filename attribute..
|
||||||
// Should return ErrTreeNotFound if the tree is not found, and empty result if the node is not in the tree.
|
// Should return ErrTreeNotFound if the tree is not found, and empty result if the node is not in the tree.
|
||||||
TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeID Node, last string, count int) ([]NodeInfo, string, error)
|
TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeID Node, last *string, count int) ([]NodeInfo, *string, error)
|
||||||
// TreeGetOpLog returns first log operation stored at or above the height.
|
// TreeGetOpLog returns first log operation stored at or above the height.
|
||||||
// In case no such operation is found, empty Move and nil error should be returned.
|
// In case no such operation is found, empty Move and nil error should be returned.
|
||||||
TreeGetOpLog(ctx context.Context, cid cidSDK.ID, treeID string, height uint64) (Move, error)
|
TreeGetOpLog(ctx context.Context, cid cidSDK.ID, treeID string, height uint64) (Move, error)
|
||||||
|
|
|
@ -184,7 +184,7 @@ func (s *Shard) TreeGetChildren(ctx context.Context, cid cidSDK.ID, treeID strin
|
||||||
}
|
}
|
||||||
|
|
||||||
// TreeSortedByFilename implements the pilorama.Forest interface.
|
// TreeSortedByFilename implements the pilorama.Forest interface.
|
||||||
func (s *Shard) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeID pilorama.Node, last string, count int) ([]pilorama.NodeInfo, string, error) {
|
func (s *Shard) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeID pilorama.Node, last *string, count int) ([]pilorama.NodeInfo, *string, error) {
|
||||||
ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeSortedByFilename",
|
ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeSortedByFilename",
|
||||||
trace.WithAttributes(
|
trace.WithAttributes(
|
||||||
attribute.String("shard_id", s.ID().String()),
|
attribute.String("shard_id", s.ID().String()),
|
||||||
|
@ -196,14 +196,14 @@ func (s *Shard) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID
|
||||||
defer span.End()
|
defer span.End()
|
||||||
|
|
||||||
if s.pilorama == nil {
|
if s.pilorama == nil {
|
||||||
return nil, "", ErrPiloramaDisabled
|
return nil, last, ErrPiloramaDisabled
|
||||||
}
|
}
|
||||||
|
|
||||||
s.m.RLock()
|
s.m.RLock()
|
||||||
defer s.m.RUnlock()
|
defer s.m.RUnlock()
|
||||||
|
|
||||||
if s.info.Mode.NoMetabase() {
|
if s.info.Mode.NoMetabase() {
|
||||||
return nil, "", ErrDegradedMode
|
return nil, last, ErrDegradedMode
|
||||||
}
|
}
|
||||||
return s.pilorama.TreeSortedByFilename(ctx, cid, treeID, nodeID, last, count)
|
return s.pilorama.TreeSortedByFilename(ctx, cid, treeID, nodeID, last, count)
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,6 +4,7 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
|
@ -48,6 +49,8 @@ type cfg struct {
|
||||||
switchInterval time.Duration
|
switchInterval time.Duration
|
||||||
|
|
||||||
morphCacheMetrics metrics.MorphCacheMetrics
|
morphCacheMetrics metrics.MorphCacheMetrics
|
||||||
|
|
||||||
|
cmode *atomic.Bool
|
||||||
}
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -311,3 +314,11 @@ func WithMorphCacheMetrics(morphCacheMetrics metrics.MorphCacheMetrics) Option {
|
||||||
c.morphCacheMetrics = morphCacheMetrics
|
c.morphCacheMetrics = morphCacheMetrics
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WithCompatibilityMode indicates that Client is working in compatibility mode
|
||||||
|
// in this mode we need to keep backward compatibility with services with previous version.
|
||||||
|
func WithCompatibilityMode(cmode *atomic.Bool) Option {
|
||||||
|
return func(c *cfg) {
|
||||||
|
c.cmode = cmode
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -566,14 +566,19 @@ func (c *Client) notaryCosigners(invokedByAlpha bool, ir []*keys.PublicKey, comm
|
||||||
}
|
}
|
||||||
s := make([]actor.SignerAccount, 2, 3)
|
s := make([]actor.SignerAccount, 2, 3)
|
||||||
// Proxy contract that will pay for the execution.
|
// Proxy contract that will pay for the execution.
|
||||||
s[0] = actor.SignerAccount{
|
|
||||||
Signer: transaction.Signer{
|
|
||||||
Account: c.notary.proxy,
|
|
||||||
// Do not change this:
|
// Do not change this:
|
||||||
// We must be able to call NNS contract indirectly from the Container contract.
|
// We must be able to call NNS contract indirectly from the Container contract.
|
||||||
// Thus, CalledByEntry is not sufficient.
|
// Thus, CalledByEntry is not sufficient.
|
||||||
// In future we may restrict this to all the usecases we have.
|
// In future we may restrict this to all the usecases we have.
|
||||||
Scopes: transaction.Global,
|
scopes := transaction.Global
|
||||||
|
if c.cfg.cmode != nil && c.cfg.cmode.Load() {
|
||||||
|
// Set it to None to keep ability to send notary requests during upgrade
|
||||||
|
scopes = transaction.None
|
||||||
|
}
|
||||||
|
s[0] = actor.SignerAccount{
|
||||||
|
Signer: transaction.Signer{
|
||||||
|
Account: c.notary.proxy,
|
||||||
|
Scopes: scopes,
|
||||||
},
|
},
|
||||||
Account: notary.FakeContractAccount(c.notary.proxy),
|
Account: notary.FakeContractAccount(c.notary.proxy),
|
||||||
}
|
}
|
||||||
|
|
|
@ -54,6 +54,9 @@ type Prm struct {
|
||||||
|
|
||||||
// If SoftAPECheck is set to true, then NoRuleFound is interpreted as allow.
|
// If SoftAPECheck is set to true, then NoRuleFound is interpreted as allow.
|
||||||
SoftAPECheck bool
|
SoftAPECheck bool
|
||||||
|
|
||||||
|
// If true, object headers will not retrieved from storage engine.
|
||||||
|
WithoutHeaderRequest bool
|
||||||
}
|
}
|
||||||
|
|
||||||
var errMissingOID = errors.New("object ID is not set")
|
var errMissingOID = errors.New("object ID is not set")
|
||||||
|
|
|
@ -17,36 +17,38 @@ import (
|
||||||
|
|
||||||
type request struct {
|
type request struct {
|
||||||
operation string
|
operation string
|
||||||
resource *resource
|
resource resource
|
||||||
properties map[string]string
|
properties map[string]string
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ aperesource.Request = (*request)(nil)
|
var defaultRequest = request{}
|
||||||
|
|
||||||
|
var _ aperesource.Request = request{}
|
||||||
|
|
||||||
type resource struct {
|
type resource struct {
|
||||||
name string
|
name string
|
||||||
properties map[string]string
|
properties map[string]string
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ aperesource.Resource = (*resource)(nil)
|
var _ aperesource.Resource = resource{}
|
||||||
|
|
||||||
func (r *resource) Name() string {
|
func (r resource) Name() string {
|
||||||
return r.name
|
return r.name
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *resource) Property(key string) string {
|
func (r resource) Property(key string) string {
|
||||||
return r.properties[key]
|
return r.properties[key]
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *request) Operation() string {
|
func (r request) Operation() string {
|
||||||
return r.operation
|
return r.operation
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *request) Property(key string) string {
|
func (r request) Property(key string) string {
|
||||||
return r.properties[key]
|
return r.properties[key]
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *request) Resource() aperesource.Resource {
|
func (r request) Resource() aperesource.Resource {
|
||||||
return r.resource
|
return r.resource
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -123,7 +125,7 @@ func objectProperties(cnr cid.ID, oid *oid.ID, cnrOwner user.ID, header *objectV
|
||||||
// newAPERequest creates an APE request to be passed to a chain router. It collects resource properties from
|
// newAPERequest creates an APE request to be passed to a chain router. It collects resource properties from
|
||||||
// header provided by headerProvider. If it cannot be found in headerProvider, then properties are
|
// header provided by headerProvider. If it cannot be found in headerProvider, then properties are
|
||||||
// initialized from header given in prm (if it is set). Otherwise, just CID and OID are set to properties.
|
// initialized from header given in prm (if it is set). Otherwise, just CID and OID are set to properties.
|
||||||
func (c *checkerImpl) newAPERequest(ctx context.Context, prm Prm) (*request, error) {
|
func (c *checkerImpl) newAPERequest(ctx context.Context, prm Prm) (request, error) {
|
||||||
switch prm.Method {
|
switch prm.Method {
|
||||||
case nativeschema.MethodGetObject,
|
case nativeschema.MethodGetObject,
|
||||||
nativeschema.MethodHeadObject,
|
nativeschema.MethodHeadObject,
|
||||||
|
@ -131,32 +133,31 @@ func (c *checkerImpl) newAPERequest(ctx context.Context, prm Prm) (*request, err
|
||||||
nativeschema.MethodHashObject,
|
nativeschema.MethodHashObject,
|
||||||
nativeschema.MethodDeleteObject:
|
nativeschema.MethodDeleteObject:
|
||||||
if prm.Object == nil {
|
if prm.Object == nil {
|
||||||
return nil, fmt.Errorf("method %s: %w", prm.Method, errMissingOID)
|
return defaultRequest, fmt.Errorf("method %s: %w", prm.Method, errMissingOID)
|
||||||
}
|
}
|
||||||
case nativeschema.MethodSearchObject, nativeschema.MethodPutObject:
|
case nativeschema.MethodSearchObject, nativeschema.MethodPutObject:
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("unknown method: %s", prm.Method)
|
return defaultRequest, fmt.Errorf("unknown method: %s", prm.Method)
|
||||||
}
|
}
|
||||||
|
|
||||||
var header *objectV2.Header
|
var header *objectV2.Header
|
||||||
if prm.Header != nil {
|
if prm.Header != nil {
|
||||||
header = prm.Header
|
header = prm.Header
|
||||||
} else if prm.Object != nil {
|
} else if prm.Object != nil && !prm.WithoutHeaderRequest {
|
||||||
headerObjSDK, err := c.headerProvider.GetHeader(ctx, prm.Container, *prm.Object)
|
headerObjSDK, err := c.headerProvider.GetHeader(ctx, prm.Container, *prm.Object)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
header = headerObjSDK.ToV2().GetHeader()
|
header = headerObjSDK.ToV2().GetHeader()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return &request{
|
var result request
|
||||||
operation: prm.Method,
|
result.operation = prm.Method
|
||||||
resource: &resource{
|
result.resource.name = resourceName(prm.Container, prm.Object, prm.Namespace)
|
||||||
name: resourceName(prm.Container, prm.Object, prm.Namespace),
|
result.resource.properties = objectProperties(prm.Container, prm.Object, prm.ContainerOwner, header)
|
||||||
properties: objectProperties(prm.Container, prm.Object, prm.ContainerOwner, header),
|
result.properties = map[string]string{
|
||||||
},
|
|
||||||
properties: map[string]string{
|
|
||||||
nativeschema.PropertyKeyActorPublicKey: prm.SenderKey,
|
nativeschema.PropertyKeyActorPublicKey: prm.SenderKey,
|
||||||
nativeschema.PropertyKeyActorRole: prm.Role,
|
nativeschema.PropertyKeyActorRole: prm.Role,
|
||||||
},
|
}
|
||||||
}, nil
|
|
||||||
|
return result, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -258,7 +258,7 @@ func TestNewAPERequest(t *testing.T) {
|
||||||
|
|
||||||
expectedRequest := request{
|
expectedRequest := request{
|
||||||
operation: method,
|
operation: method,
|
||||||
resource: &resource{
|
resource: resource{
|
||||||
name: resourceName(cnr, obj, prm.Namespace),
|
name: resourceName(cnr, obj, prm.Namespace),
|
||||||
properties: objectProperties(cnr, obj, testCnrOwner, func() *objectV2.Header {
|
properties: objectProperties(cnr, obj, testCnrOwner, func() *objectV2.Header {
|
||||||
if headerObjSDK != nil {
|
if headerObjSDK != nil {
|
||||||
|
@ -273,7 +273,7 @@ func TestNewAPERequest(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
require.Equal(t, expectedRequest, *r)
|
require.Equal(t, expectedRequest, r)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
|
@ -133,6 +133,7 @@ func (c *Service) Get(request *objectV2.GetRequest, stream objectSvc.GetObjectSt
|
||||||
SenderKey: hex.EncodeToString(reqCtx.SenderKey),
|
SenderKey: hex.EncodeToString(reqCtx.SenderKey),
|
||||||
ContainerOwner: reqCtx.ContainerOwner,
|
ContainerOwner: reqCtx.ContainerOwner,
|
||||||
SoftAPECheck: reqCtx.SoftAPECheck,
|
SoftAPECheck: reqCtx.SoftAPECheck,
|
||||||
|
WithoutHeaderRequest: true,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return toStatusErr(err)
|
return toStatusErr(err)
|
||||||
|
@ -219,6 +220,7 @@ func (c *Service) Head(ctx context.Context, request *objectV2.HeadRequest) (*obj
|
||||||
SenderKey: hex.EncodeToString(reqCtx.SenderKey),
|
SenderKey: hex.EncodeToString(reqCtx.SenderKey),
|
||||||
ContainerOwner: reqCtx.ContainerOwner,
|
ContainerOwner: reqCtx.ContainerOwner,
|
||||||
SoftAPECheck: reqCtx.SoftAPECheck,
|
SoftAPECheck: reqCtx.SoftAPECheck,
|
||||||
|
WithoutHeaderRequest: true,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, toStatusErr(err)
|
return nil, toStatusErr(err)
|
||||||
|
|
|
@ -114,7 +114,7 @@ func (a *assembler) initializeFromSourceObjectID(ctx context.Context, id oid.ID)
|
||||||
}
|
}
|
||||||
|
|
||||||
to := uint64(0)
|
to := uint64(0)
|
||||||
if seekOff+seekLen > a.currentOffset+from {
|
if seekOff+seekLen >= a.currentOffset+from {
|
||||||
to = seekOff + seekLen - a.currentOffset
|
to = seekOff + seekLen - a.currentOffset
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package getsvc
|
package getsvc
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"crypto/ecdsa"
|
"crypto/ecdsa"
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
|
@ -25,6 +26,9 @@ import (
|
||||||
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||||
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
|
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/transformer"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version"
|
||||||
|
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -61,6 +65,9 @@ type testEpochReceiver uint64
|
||||||
func (e testEpochReceiver) Epoch() (uint64, error) {
|
func (e testEpochReceiver) Epoch() (uint64, error) {
|
||||||
return uint64(e), nil
|
return uint64(e), nil
|
||||||
}
|
}
|
||||||
|
func (e testEpochReceiver) CurrentEpoch() uint64 {
|
||||||
|
return uint64(e)
|
||||||
|
}
|
||||||
|
|
||||||
func newTestStorage() *testStorage {
|
func newTestStorage() *testStorage {
|
||||||
return &testStorage{
|
return &testStorage{
|
||||||
|
@ -555,21 +562,6 @@ func TestGetRemoteSmall(t *testing.T) {
|
||||||
return p
|
return p
|
||||||
}
|
}
|
||||||
|
|
||||||
newRngPrm := func(raw bool, w ChunkWriter, off, ln uint64) RangePrm {
|
|
||||||
p := RangePrm{}
|
|
||||||
p.SetChunkWriter(w)
|
|
||||||
p.WithRawFlag(raw)
|
|
||||||
p.common = new(util.CommonPrm).WithLocalOnly(false)
|
|
||||||
|
|
||||||
r := objectSDK.NewRange()
|
|
||||||
r.SetOffset(off)
|
|
||||||
r.SetLength(ln)
|
|
||||||
|
|
||||||
p.SetRange(r)
|
|
||||||
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
newHeadPrm := func(raw bool, w ObjectWriter) HeadPrm {
|
newHeadPrm := func(raw bool, w ObjectWriter) HeadPrm {
|
||||||
p := HeadPrm{}
|
p := HeadPrm{}
|
||||||
p.SetHeaderWriter(w)
|
p.SetHeaderWriter(w)
|
||||||
|
@ -1628,6 +1620,204 @@ func TestGetRemoteSmall(t *testing.T) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type testTarget struct {
|
||||||
|
objects []*objectSDK.Object
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tt *testTarget) WriteObject(_ context.Context, obj *objectSDK.Object) error {
|
||||||
|
tt.objects = append(tt.objects, obj)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func objectChain(t *testing.T, cnr cid.ID, singleSize, totalSize uint64) (oid.ID, []*objectSDK.Object, *objectSDK.Object, []byte) {
|
||||||
|
pk, err := keys.NewPrivateKey()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
tt := new(testTarget)
|
||||||
|
p := transformer.NewPayloadSizeLimiter(transformer.Params{
|
||||||
|
Key: &pk.PrivateKey,
|
||||||
|
NextTargetInit: func() transformer.ObjectWriter { return tt },
|
||||||
|
NetworkState: testEpochReceiver(1),
|
||||||
|
MaxSize: singleSize,
|
||||||
|
})
|
||||||
|
|
||||||
|
payload := make([]byte, totalSize)
|
||||||
|
_, err = rand.Read(payload)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
ver := version.Current()
|
||||||
|
hdr := objectSDK.New()
|
||||||
|
hdr.SetContainerID(cnr)
|
||||||
|
hdr.SetType(objectSDK.TypeRegular)
|
||||||
|
hdr.SetVersion(&ver)
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
require.NoError(t, p.WriteHeader(ctx, hdr))
|
||||||
|
|
||||||
|
_, err = p.Write(ctx, payload)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
res, err := p.Close(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
if totalSize <= singleSize {
|
||||||
|
// Small object, no linking.
|
||||||
|
require.Len(t, tt.objects, 1)
|
||||||
|
return res.SelfID, tt.objects, nil, payload
|
||||||
|
}
|
||||||
|
|
||||||
|
return *res.ParentID, tt.objects[:len(tt.objects)-1], tt.objects[len(tt.objects)-1], bytes.Clone(payload)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newRngPrm(raw bool, w ChunkWriter, off, ln uint64) RangePrm {
|
||||||
|
p := RangePrm{}
|
||||||
|
p.SetChunkWriter(w)
|
||||||
|
p.WithRawFlag(raw)
|
||||||
|
p.common = new(util.CommonPrm)
|
||||||
|
|
||||||
|
r := objectSDK.NewRange()
|
||||||
|
r.SetOffset(off)
|
||||||
|
r.SetLength(ln)
|
||||||
|
|
||||||
|
p.SetRange(r)
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetRange(t *testing.T) {
|
||||||
|
var cnr container.Container
|
||||||
|
cnr.SetPlacementPolicy(netmaptest.PlacementPolicy())
|
||||||
|
|
||||||
|
var idCnr cid.ID
|
||||||
|
container.CalculateID(&idCnr, cnr)
|
||||||
|
|
||||||
|
ns, as := testNodeMatrix(t, []int{2})
|
||||||
|
|
||||||
|
testGetRange := func(t *testing.T, svc *Service, addr oid.Address, from, to uint64, payload []byte) {
|
||||||
|
w := NewSimpleObjectWriter()
|
||||||
|
rngPrm := newRngPrm(false, w, from, to-from)
|
||||||
|
rngPrm.WithAddress(addr)
|
||||||
|
|
||||||
|
err := svc.GetRange(context.Background(), rngPrm)
|
||||||
|
require.NoError(t, err)
|
||||||
|
if from == to {
|
||||||
|
require.Nil(t, w.Object().Payload())
|
||||||
|
} else {
|
||||||
|
require.Equal(t, payload[from:to], w.Object().Payload())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
newSvc := func(b *testPlacementBuilder, c *testClientCache) *Service {
|
||||||
|
const curEpoch = 13
|
||||||
|
|
||||||
|
return &Service{
|
||||||
|
log: test.NewLogger(t),
|
||||||
|
localStorage: newTestStorage(),
|
||||||
|
traverserGenerator: &testTraverserGenerator{
|
||||||
|
c: cnr,
|
||||||
|
b: map[uint64]placement.Builder{
|
||||||
|
curEpoch: b,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
epochSource: testEpochReceiver(curEpoch),
|
||||||
|
remoteStorageConstructor: c,
|
||||||
|
keyStore: &testKeyStorage{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run("small", func(t *testing.T) {
|
||||||
|
const totalSize = 5
|
||||||
|
_, objs, _, payload := objectChain(t, idCnr, totalSize, totalSize)
|
||||||
|
require.Len(t, objs, 1)
|
||||||
|
require.Len(t, payload, totalSize)
|
||||||
|
|
||||||
|
obj := objs[0]
|
||||||
|
addr := object.AddressOf(obj)
|
||||||
|
builder := &testPlacementBuilder{vectors: map[string][][]netmap.NodeInfo{addr.EncodeToString(): ns}}
|
||||||
|
|
||||||
|
c1 := newTestClient()
|
||||||
|
c1.addResult(addr, obj, nil)
|
||||||
|
|
||||||
|
svc := newSvc(builder, &testClientCache{
|
||||||
|
clients: map[string]*testClient{
|
||||||
|
as[0][0]: c1,
|
||||||
|
as[0][1]: c1,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
|
for from := 0; from < totalSize-1; from++ {
|
||||||
|
for to := from; to < totalSize; to++ {
|
||||||
|
t.Run(fmt.Sprintf("from=%d,to=%d", from, to), func(t *testing.T) {
|
||||||
|
testGetRange(t, svc, addr, uint64(from), uint64(to), payload)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
})
|
||||||
|
t.Run("big", func(t *testing.T) {
|
||||||
|
const totalSize = 9
|
||||||
|
id, objs, link, payload := objectChain(t, idCnr, 3, totalSize) // 3 parts
|
||||||
|
require.Equal(t, totalSize, len(payload))
|
||||||
|
|
||||||
|
builder := &testPlacementBuilder{vectors: map[string][][]netmap.NodeInfo{}}
|
||||||
|
builder.vectors[idCnr.EncodeToString()+"/"+id.EncodeToString()] = ns
|
||||||
|
builder.vectors[object.AddressOf(link).EncodeToString()] = ns
|
||||||
|
for i := range objs {
|
||||||
|
builder.vectors[object.AddressOf(objs[i]).EncodeToString()] = ns
|
||||||
|
}
|
||||||
|
|
||||||
|
var addr oid.Address
|
||||||
|
addr.SetContainer(idCnr)
|
||||||
|
addr.SetObject(id)
|
||||||
|
|
||||||
|
const (
|
||||||
|
linkingLast = "splitinfo=last"
|
||||||
|
linkingChildren = "splitinfo=children"
|
||||||
|
linkingBoth = "splitinfo=both"
|
||||||
|
)
|
||||||
|
|
||||||
|
lastID, _ := objs[len(objs)-1].ID()
|
||||||
|
linkID, _ := link.ID()
|
||||||
|
|
||||||
|
for _, kind := range []string{linkingLast, linkingChildren, linkingBoth} {
|
||||||
|
t.Run(kind, func(t *testing.T) {
|
||||||
|
c1 := newTestClient()
|
||||||
|
for i := range objs {
|
||||||
|
c1.addResult(object.AddressOf(objs[i]), objs[i], nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
c1.addResult(object.AddressOf(link), link, nil)
|
||||||
|
|
||||||
|
si := objectSDK.NewSplitInfo()
|
||||||
|
switch kind {
|
||||||
|
case linkingLast:
|
||||||
|
si.SetLastPart(lastID)
|
||||||
|
case linkingChildren:
|
||||||
|
si.SetLink(linkID)
|
||||||
|
case linkingBoth:
|
||||||
|
si.SetLastPart(lastID)
|
||||||
|
si.SetLink(linkID)
|
||||||
|
}
|
||||||
|
c1.addResult(addr, nil, objectSDK.NewSplitInfoError(si))
|
||||||
|
|
||||||
|
svc := newSvc(builder, &testClientCache{
|
||||||
|
clients: map[string]*testClient{
|
||||||
|
as[0][0]: c1,
|
||||||
|
as[0][1]: c1,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
|
for from := 0; from < totalSize-1; from++ {
|
||||||
|
for to := from; to < totalSize; to++ {
|
||||||
|
t.Run(fmt.Sprintf("from=%d,to=%d", from, to), func(t *testing.T) {
|
||||||
|
testGetRange(t, svc, addr, uint64(from), uint64(to), payload)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func TestGetFromPastEpoch(t *testing.T) {
|
func TestGetFromPastEpoch(t *testing.T) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
|
|
|
@ -348,7 +348,7 @@ func PayloadRange(ctx context.Context, prm PayloadRangePrm) (*PayloadRangeRes, e
|
||||||
ln = maxInitialBufferSize
|
ln = maxInitialBufferSize
|
||||||
}
|
}
|
||||||
|
|
||||||
w := bytes.NewBuffer(make([]byte, ln))
|
w := bytes.NewBuffer(make([]byte, 0, ln))
|
||||||
_, err = io.CopyN(w, rdr, int64(prm.ln))
|
_, err = io.CopyN(w, rdr, int64(prm.ln))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("read payload: %w", err)
|
return nil, fmt.Errorf("read payload: %w", err)
|
||||||
|
|
|
@ -130,7 +130,7 @@ func TestGetSubTreeOrderAsc(t *testing.T) {
|
||||||
|
|
||||||
t.Run("boltdb forest", func(t *testing.T) {
|
t.Run("boltdb forest", func(t *testing.T) {
|
||||||
p := pilorama.NewBoltForest(pilorama.WithPath(filepath.Join(t.TempDir(), "pilorama")))
|
p := pilorama.NewBoltForest(pilorama.WithPath(filepath.Join(t.TempDir(), "pilorama")))
|
||||||
require.NoError(t, p.Open(context.Background(), 0644))
|
require.NoError(t, p.Open(context.Background(), 0o644))
|
||||||
require.NoError(t, p.Init())
|
require.NoError(t, p.Init())
|
||||||
testGetSubTreeOrderAsc(t, p)
|
testGetSubTreeOrderAsc(t, p)
|
||||||
})
|
})
|
||||||
|
|
|
@ -446,7 +446,7 @@ func getSortedSubTree(ctx context.Context, srv TreeService_GetSubTreeServer, cid
|
||||||
type stackItem struct {
|
type stackItem struct {
|
||||||
values []pilorama.NodeInfo
|
values []pilorama.NodeInfo
|
||||||
parent pilorama.Node
|
parent pilorama.Node
|
||||||
last string
|
last *string
|
||||||
}
|
}
|
||||||
|
|
||||||
// Traverse the tree in a DFS manner. Because we need to support arbitrary depth,
|
// Traverse the tree in a DFS manner. Because we need to support arbitrary depth,
|
||||||
|
@ -502,7 +502,7 @@ func getSortedSubTree(ctx context.Context, srv TreeService_GetSubTreeServer, cid
|
||||||
}
|
}
|
||||||
|
|
||||||
if b.GetDepth() == 0 || uint32(len(stack)) < b.GetDepth() {
|
if b.GetDepth() == 0 || uint32(len(stack)) < b.GetDepth() {
|
||||||
children, last, err := forest.TreeSortedByFilename(ctx, cid, b.GetTreeId(), node.ID, "", batchSize)
|
children, last, err := forest.TreeSortedByFilename(ctx, cid, b.GetTreeId(), node.ID, nil, batchSize)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Reference in a new issue