Compare commits
2 commits
master
...
feat/get-p
Author | SHA1 | Date | |
---|---|---|---|
efe1143ce2 | |||
ca816de8e9 |
34 changed files with 99 additions and 344 deletions
|
@ -87,7 +87,5 @@ linters:
|
||||||
- perfsprint
|
- perfsprint
|
||||||
- testifylint
|
- testifylint
|
||||||
- protogetter
|
- protogetter
|
||||||
- intrange
|
|
||||||
- tenv
|
|
||||||
disable-all: true
|
disable-all: true
|
||||||
fast: false
|
fast: false
|
||||||
|
|
11
.woodpecker/pre-commit.yml
Normal file
11
.woodpecker/pre-commit.yml
Normal file
|
@ -0,0 +1,11 @@
|
||||||
|
pipeline:
|
||||||
|
# Kludge for non-root containers under WoodPecker
|
||||||
|
fix-ownership:
|
||||||
|
image: alpine:latest
|
||||||
|
commands: chown -R 1234:1234 .
|
||||||
|
|
||||||
|
pre-commit:
|
||||||
|
image: git.frostfs.info/truecloudlab/frostfs-ci:v0.36
|
||||||
|
commands:
|
||||||
|
- export HOME="$(getent passwd $(id -u) | cut '-d:' -f6)"
|
||||||
|
- pre-commit run --hook-stage manual
|
2
Makefile
2
Makefile
|
@ -8,7 +8,7 @@ HUB_IMAGE ?= git.frostfs.info/truecloudlab/frostfs
|
||||||
HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')"
|
HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')"
|
||||||
|
|
||||||
GO_VERSION ?= 1.22
|
GO_VERSION ?= 1.22
|
||||||
LINT_VERSION ?= 1.61.0
|
LINT_VERSION ?= 1.60.3
|
||||||
TRUECLOUDLAB_LINT_VERSION ?= 0.0.7
|
TRUECLOUDLAB_LINT_VERSION ?= 0.0.7
|
||||||
PROTOC_VERSION ?= 25.0
|
PROTOC_VERSION ?= 25.0
|
||||||
PROTOGEN_FROSTFS_VERSION ?= $(shell go list -f '{{.Version}}' -m git.frostfs.info/TrueCloudLab/frostfs-api-go/v2)
|
PROTOGEN_FROSTFS_VERSION ?= $(shell go list -f '{{.Version}}' -m git.frostfs.info/TrueCloudLab/frostfs-api-go/v2)
|
||||||
|
|
|
@ -128,7 +128,7 @@ func generateConfigExample(appDir string, credSize int) (string, error) {
|
||||||
tmpl.AlphabetDir = filepath.Join(appDir, "alphabet-wallets")
|
tmpl.AlphabetDir = filepath.Join(appDir, "alphabet-wallets")
|
||||||
|
|
||||||
var i innerring.GlagoliticLetter
|
var i innerring.GlagoliticLetter
|
||||||
for i = range innerring.GlagoliticLetter(credSize) {
|
for i = 0; i < innerring.GlagoliticLetter(credSize); i++ {
|
||||||
tmpl.Glagolitics = append(tmpl.Glagolitics, i.String())
|
tmpl.Glagolitics = append(tmpl.Glagolitics, i.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -63,7 +63,7 @@ func TestGenerateAlphabet(t *testing.T) {
|
||||||
buf.Reset()
|
buf.Reset()
|
||||||
v.Set(commonflags.AlphabetWalletsFlag, walletDir)
|
v.Set(commonflags.AlphabetWalletsFlag, walletDir)
|
||||||
require.NoError(t, GenerateAlphabetCmd.Flags().Set(commonflags.AlphabetSizeFlag, strconv.FormatUint(size, 10)))
|
require.NoError(t, GenerateAlphabetCmd.Flags().Set(commonflags.AlphabetSizeFlag, strconv.FormatUint(size, 10)))
|
||||||
for i := range uint64(size) {
|
for i := uint64(0); i < size; i++ {
|
||||||
buf.WriteString(strconv.FormatUint(i, 10) + "\r")
|
buf.WriteString(strconv.FormatUint(i, 10) + "\r")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -659,7 +659,9 @@ func SearchObjects(ctx context.Context, prm SearchObjectsPrm) (*SearchObjectsRes
|
||||||
|
|
||||||
for {
|
for {
|
||||||
n, ok = rdr.Read(buf)
|
n, ok = rdr.Read(buf)
|
||||||
list = append(list, buf[:n]...)
|
for i := range n {
|
||||||
|
list = append(list, buf[i])
|
||||||
|
}
|
||||||
if !ok {
|
if !ok {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
|
@ -195,7 +195,7 @@ func flattenComplexMembersIfECContainer(cmd *cobra.Command, cnrID cid.ID, member
|
||||||
prmHead.SetRawFlag(true) // to get an error instead of whole object
|
prmHead.SetRawFlag(true) // to get an error instead of whole object
|
||||||
|
|
||||||
eg, egCtx := errgroup.WithContext(cmd.Context())
|
eg, egCtx := errgroup.WithContext(cmd.Context())
|
||||||
for idx := range members {
|
for idx := range len(members) {
|
||||||
partObjID := members[idx]
|
partObjID := members[idx]
|
||||||
|
|
||||||
eg.Go(func() error {
|
eg.Go(func() error {
|
||||||
|
|
|
@ -114,15 +114,13 @@ func initConfig() {
|
||||||
} else {
|
} else {
|
||||||
// Find home directory.
|
// Find home directory.
|
||||||
home, err := homedir.Dir()
|
home, err := homedir.Dir()
|
||||||
if err != nil {
|
commonCmd.ExitOnErr(rootCmd, "", err)
|
||||||
common.PrintVerbose(rootCmd, "Get homedir: %s", err)
|
|
||||||
} else {
|
|
||||||
// Search config in `$HOME/.config/frostfs-cli/` with name "config.yaml"
|
// Search config in `$HOME/.config/frostfs-cli/` with name "config.yaml"
|
||||||
viper.AddConfigPath(filepath.Join(home, ".config", "frostfs-cli"))
|
viper.AddConfigPath(filepath.Join(home, ".config", "frostfs-cli"))
|
||||||
viper.SetConfigName("config")
|
viper.SetConfigName("config")
|
||||||
viper.SetConfigType("yaml")
|
viper.SetConfigType("yaml")
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
viper.SetEnvPrefix(envPrefix)
|
viper.SetEnvPrefix(envPrefix)
|
||||||
viper.AutomaticEnv() // read in environment variables that match
|
viper.AutomaticEnv() // read in environment variables that match
|
||||||
|
|
|
@ -11,7 +11,7 @@ func DecodeOIDs(data []byte) ([]oid.ID, error) {
|
||||||
size := r.ReadVarUint()
|
size := r.ReadVarUint()
|
||||||
oids := make([]oid.ID, size)
|
oids := make([]oid.ID, size)
|
||||||
|
|
||||||
for i := range size {
|
for i := uint64(0); i < size; i++ {
|
||||||
if err := oids[i].Decode(r.ReadVarBytes()); err != nil {
|
if err := oids[i].Decode(r.ReadVarBytes()); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package config_test
|
package config_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
@ -37,7 +38,8 @@ func TestConfigEnv(t *testing.T) {
|
||||||
|
|
||||||
envName := strings.ToUpper(
|
envName := strings.ToUpper(
|
||||||
strings.Join([]string{config.EnvPrefix, section, name}, configViper.EnvSeparator))
|
strings.Join([]string{config.EnvPrefix, section, name}, configViper.EnvSeparator))
|
||||||
t.Setenv(envName, value)
|
err := os.Setenv(envName, value)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
c := configtest.EmptyConfig()
|
c := configtest.EmptyConfig()
|
||||||
|
|
||||||
|
|
|
@ -11,6 +11,8 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
func fromFile(path string) *config.Config {
|
func fromFile(path string) *config.Config {
|
||||||
|
os.Clearenv() // ENVs have priority over config files, so we do this in tests
|
||||||
|
|
||||||
return config.New(path, "", "")
|
return config.New(path, "", "")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -38,6 +40,15 @@ func ForEachFileType(pref string, f func(*config.Config)) {
|
||||||
|
|
||||||
// ForEnvFileType creates config from `<pref>.env` file.
|
// ForEnvFileType creates config from `<pref>.env` file.
|
||||||
func ForEnvFileType(t testing.TB, pref string, f func(*config.Config)) {
|
func ForEnvFileType(t testing.TB, pref string, f func(*config.Config)) {
|
||||||
|
envs := os.Environ()
|
||||||
|
t.Cleanup(func() {
|
||||||
|
os.Clearenv()
|
||||||
|
for _, env := range envs {
|
||||||
|
keyValue := strings.Split(env, "=")
|
||||||
|
os.Setenv(keyValue[0], keyValue[1])
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
f(fromEnvFile(t, pref+".env"))
|
f(fromEnvFile(t, pref+".env"))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -62,6 +73,7 @@ func loadEnv(t testing.TB, path string) {
|
||||||
|
|
||||||
v = strings.Trim(v, `"`)
|
v = strings.Trim(v, `"`)
|
||||||
|
|
||||||
t.Setenv(k, v)
|
err = os.Setenv(k, v)
|
||||||
|
require.NoError(t, err, "can't set environment variable")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -8,7 +8,6 @@ import (
|
||||||
containerGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container/grpc"
|
containerGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container/grpc"
|
||||||
morphconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/morph"
|
morphconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/morph"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
|
|
||||||
containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
|
containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
|
||||||
frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid"
|
frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid"
|
||||||
cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
|
cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
|
||||||
|
@ -43,7 +42,7 @@ func initContainerService(_ context.Context, c *cfg) {
|
||||||
|
|
||||||
cacheSize := morphconfig.FrostfsIDCacheSize(c.appCfg)
|
cacheSize := morphconfig.FrostfsIDCacheSize(c.appCfg)
|
||||||
if cacheSize > 0 {
|
if cacheSize > 0 {
|
||||||
frostfsIDSubjectProvider = newMorphFrostfsIDCache(frostfsIDSubjectProvider, int(cacheSize), c.cfgMorph.cacheTTL, metrics.NewCacheMetrics("frostfs_id"))
|
frostfsIDSubjectProvider = newMorphFrostfsIDCache(frostfsIDSubjectProvider, int(cacheSize), c.cfgMorph.cacheTTL)
|
||||||
}
|
}
|
||||||
|
|
||||||
c.shared.frostfsidClient = frostfsIDSubjectProvider
|
c.shared.frostfsidClient = frostfsIDSubjectProvider
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"strings"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client"
|
"git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client"
|
||||||
|
@ -10,99 +9,55 @@ import (
|
||||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
type subjectWithError struct {
|
|
||||||
subject *client.Subject
|
|
||||||
err error
|
|
||||||
}
|
|
||||||
|
|
||||||
type subjectExtWithError struct {
|
|
||||||
subject *client.SubjectExtended
|
|
||||||
err error
|
|
||||||
}
|
|
||||||
|
|
||||||
type morphFrostfsIDCache struct {
|
type morphFrostfsIDCache struct {
|
||||||
subjProvider frostfsidcore.SubjectProvider
|
subjProvider frostfsidcore.SubjectProvider
|
||||||
|
|
||||||
subjCache *expirable.LRU[util.Uint160, subjectWithError]
|
subjCache *expirable.LRU[util.Uint160, *client.Subject]
|
||||||
|
|
||||||
subjExtCache *expirable.LRU[util.Uint160, subjectExtWithError]
|
subjExtCache *expirable.LRU[util.Uint160, *client.SubjectExtended]
|
||||||
|
|
||||||
metrics cacheMetrics
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func newMorphFrostfsIDCache(subjProvider frostfsidcore.SubjectProvider, size int, ttl time.Duration, metrics cacheMetrics) frostfsidcore.SubjectProvider {
|
func newMorphFrostfsIDCache(subjProvider frostfsidcore.SubjectProvider, size int, ttl time.Duration) frostfsidcore.SubjectProvider {
|
||||||
return &morphFrostfsIDCache{
|
return &morphFrostfsIDCache{
|
||||||
subjProvider: subjProvider,
|
subjProvider: subjProvider,
|
||||||
|
|
||||||
subjCache: expirable.NewLRU(size, func(util.Uint160, subjectWithError) {}, ttl),
|
subjCache: expirable.NewLRU(size, func(util.Uint160, *client.Subject) {}, ttl),
|
||||||
|
|
||||||
subjExtCache: expirable.NewLRU(size, func(util.Uint160, subjectExtWithError) {}, ttl),
|
subjExtCache: expirable.NewLRU(size, func(util.Uint160, *client.SubjectExtended) {}, ttl),
|
||||||
|
|
||||||
metrics: metrics,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *morphFrostfsIDCache) GetSubject(addr util.Uint160) (*client.Subject, error) {
|
func (m *morphFrostfsIDCache) GetSubject(addr util.Uint160) (*client.Subject, error) {
|
||||||
hit := false
|
|
||||||
startedAt := time.Now()
|
|
||||||
defer func() {
|
|
||||||
m.metrics.AddMethodDuration("GetSubject", time.Since(startedAt), hit)
|
|
||||||
}()
|
|
||||||
|
|
||||||
result, found := m.subjCache.Get(addr)
|
result, found := m.subjCache.Get(addr)
|
||||||
if found {
|
if found {
|
||||||
hit = true
|
return result, nil
|
||||||
return result.subject, result.err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
subj, err := m.subjProvider.GetSubject(addr)
|
result, err := m.subjProvider.GetSubject(addr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if m.isCacheableError(err) {
|
|
||||||
m.subjCache.Add(addr, subjectWithError{
|
|
||||||
err: err,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
m.subjCache.Add(addr, subjectWithError{subject: subj})
|
m.subjCache.Add(addr, result)
|
||||||
return subj, nil
|
return result, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *morphFrostfsIDCache) GetSubjectExtended(addr util.Uint160) (*client.SubjectExtended, error) {
|
func (m *morphFrostfsIDCache) GetSubjectExtended(addr util.Uint160) (*client.SubjectExtended, error) {
|
||||||
hit := false
|
subjExt, found := m.subjExtCache.Get(addr)
|
||||||
startedAt := time.Now()
|
|
||||||
defer func() {
|
|
||||||
m.metrics.AddMethodDuration("GetSubjectExtended", time.Since(startedAt), hit)
|
|
||||||
}()
|
|
||||||
|
|
||||||
result, found := m.subjExtCache.Get(addr)
|
|
||||||
if found {
|
if found {
|
||||||
hit = true
|
|
||||||
return result.subject, result.err
|
|
||||||
}
|
|
||||||
|
|
||||||
subjExt, err := m.subjProvider.GetSubjectExtended(addr)
|
|
||||||
if err != nil {
|
|
||||||
if m.isCacheableError(err) {
|
|
||||||
m.subjExtCache.Add(addr, subjectExtWithError{
|
|
||||||
err: err,
|
|
||||||
})
|
|
||||||
m.subjCache.Add(addr, subjectWithError{
|
|
||||||
err: err,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
m.subjExtCache.Add(addr, subjectExtWithError{subject: subjExt})
|
|
||||||
m.subjCache.Add(addr, subjectWithError{subject: subjectFromSubjectExtended(subjExt)})
|
|
||||||
|
|
||||||
return subjExt, nil
|
return subjExt, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *morphFrostfsIDCache) isCacheableError(err error) bool {
|
var err error
|
||||||
return strings.Contains(err.Error(), frostfsidcore.SubjectNotFoundErrorMessage)
|
subjExt, err = m.subjProvider.GetSubjectExtended(addr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
m.subjExtCache.Add(addr, subjExt)
|
||||||
|
m.subjCache.Add(addr, subjectFromSubjectExtended(subjExt))
|
||||||
|
|
||||||
|
return subjExt, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func subjectFromSubjectExtended(subjExt *client.SubjectExtended) *client.Subject {
|
func subjectFromSubjectExtended(subjExt *client.SubjectExtended) *client.Subject {
|
||||||
|
|
2
go.mod
2
go.mod
|
@ -19,7 +19,6 @@ require (
|
||||||
github.com/cheggaaa/pb v1.0.29
|
github.com/cheggaaa/pb v1.0.29
|
||||||
github.com/chzyer/readline v1.5.1
|
github.com/chzyer/readline v1.5.1
|
||||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc
|
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc
|
||||||
github.com/felixge/fgprof v0.9.5
|
|
||||||
github.com/flynn-archive/go-shlex v0.0.0-20150515145356-3f9db97f8568
|
github.com/flynn-archive/go-shlex v0.0.0-20150515145356-3f9db97f8568
|
||||||
github.com/gdamore/tcell/v2 v2.7.4
|
github.com/gdamore/tcell/v2 v2.7.4
|
||||||
github.com/go-pkgz/expirable-cache/v3 v3.0.0
|
github.com/go-pkgz/expirable-cache/v3 v3.0.0
|
||||||
|
@ -78,7 +77,6 @@ require (
|
||||||
github.com/go-logr/logr v1.4.2 // indirect
|
github.com/go-logr/logr v1.4.2 // indirect
|
||||||
github.com/go-logr/stdr v1.2.2 // indirect
|
github.com/go-logr/stdr v1.2.2 // indirect
|
||||||
github.com/golang/snappy v0.0.4 // indirect
|
github.com/golang/snappy v0.0.4 // indirect
|
||||||
github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7 // indirect
|
|
||||||
github.com/gorilla/websocket v1.5.1 // indirect
|
github.com/gorilla/websocket v1.5.1 // indirect
|
||||||
github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.0 // indirect
|
github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.0 // indirect
|
||||||
github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.1 // indirect
|
github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.1 // indirect
|
||||||
|
|
BIN
go.sum
BIN
go.sum
Binary file not shown.
|
@ -61,7 +61,7 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) {
|
||||||
require.NoError(t, b.Init())
|
require.NoError(t, b.Init())
|
||||||
|
|
||||||
storageIDs := make(map[oid.Address][]byte)
|
storageIDs := make(map[oid.Address][]byte)
|
||||||
for range 100 {
|
for i := 0; i < 100; i++ {
|
||||||
obj := blobstortest.NewObject(64 * 1024) // 64KB object
|
obj := blobstortest.NewObject(64 * 1024) // 64KB object
|
||||||
data, err := obj.Marshal()
|
data, err := obj.Marshal()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -168,7 +168,7 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) {
|
||||||
|
|
||||||
storageIDs := make(map[oid.Address][]byte)
|
storageIDs := make(map[oid.Address][]byte)
|
||||||
toDelete := make(map[oid.Address][]byte)
|
toDelete := make(map[oid.Address][]byte)
|
||||||
for i := range 100 { // 2 objects for one blobovnicza, so 50 DBs total will be created
|
for i := 0; i < 100; i++ { // 2 objects for one blobovnicza, so 50 DBs total will be created
|
||||||
obj := blobstortest.NewObject(64 * 1024)
|
obj := blobstortest.NewObject(64 * 1024)
|
||||||
data, err := obj.Marshal()
|
data, err := obj.Marshal()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -236,7 +236,7 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) {
|
||||||
require.NoError(t, b.Init())
|
require.NoError(t, b.Init())
|
||||||
|
|
||||||
storageIDs := make(map[oid.Address][]byte)
|
storageIDs := make(map[oid.Address][]byte)
|
||||||
for range 100 { // 2 objects for one blobovnicza, so 50 DBs total will be created
|
for i := 0; i < 100; i++ { // 2 objects for one blobovnicza, so 50 DBs total will be created
|
||||||
obj := blobstortest.NewObject(64 * 1024)
|
obj := blobstortest.NewObject(64 * 1024)
|
||||||
data, err := obj.Marshal()
|
data, err := obj.Marshal()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
|
@ -47,7 +47,7 @@ func TestIterateObjects(t *testing.T) {
|
||||||
|
|
||||||
mObjs := make(map[string]addrData)
|
mObjs := make(map[string]addrData)
|
||||||
|
|
||||||
for i := range uint64(objNum) {
|
for i := uint64(0); i < objNum; i++ {
|
||||||
sz := smalSz
|
sz := smalSz
|
||||||
|
|
||||||
big := i < objNum/2
|
big := i < objNum/2
|
||||||
|
|
|
@ -151,7 +151,7 @@ func TestErrorReporting(t *testing.T) {
|
||||||
checkShardState(t, te.ng, te.shards[1].id, 0, mode.ReadWrite)
|
checkShardState(t, te.ng, te.shards[1].id, 0, mode.ReadWrite)
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := range uint32(2) {
|
for i := uint32(0); i < 2; i++ {
|
||||||
_, err = te.ng.Get(context.Background(), GetPrm{addr: object.AddressOf(obj)})
|
_, err = te.ng.Get(context.Background(), GetPrm{addr: object.AddressOf(obj)})
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
checkShardState(t, te.ng, te.shards[0].id, errThreshold+i, mode.ReadOnly)
|
checkShardState(t, te.ng, te.shards[0].id, errThreshold+i, mode.ReadOnly)
|
||||||
|
|
|
@ -705,7 +705,7 @@ func (t *boltForest) applyOperation(logBucket, treeBucket *bbolt.Bucket, ms []*M
|
||||||
key, value = c.Prev()
|
key, value = c.Prev()
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := range ms {
|
for i := range len(ms) {
|
||||||
// Loop invariant: key represents the next stored timestamp after ms[i].Time.
|
// Loop invariant: key represents the next stored timestamp after ms[i].Time.
|
||||||
|
|
||||||
// 2. Insert the operation.
|
// 2. Insert the operation.
|
||||||
|
|
|
@ -1081,7 +1081,7 @@ func prepareRandomTree(nodeCount, opCount int) []Move {
|
||||||
}
|
}
|
||||||
|
|
||||||
func compareForests(t *testing.T, expected, actual Forest, cid cidSDK.ID, treeID string, nodeCount int) {
|
func compareForests(t *testing.T, expected, actual Forest, cid cidSDK.ID, treeID string, nodeCount int) {
|
||||||
for i := range uint64(nodeCount) {
|
for i := uint64(0); i < uint64(nodeCount); i++ {
|
||||||
expectedMeta, expectedParent, err := expected.TreeGetMeta(context.Background(), cid, treeID, i)
|
expectedMeta, expectedParent, err := expected.TreeGetMeta(context.Background(), cid, treeID, i)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
actualMeta, actualParent, err := actual.TreeGetMeta(context.Background(), cid, treeID, i)
|
actualMeta, actualParent, err := actual.TreeGetMeta(context.Background(), cid, treeID, i)
|
||||||
|
|
|
@ -216,7 +216,7 @@ func TestRefillMetabase(t *testing.T) {
|
||||||
locked := make([]oid.ID, 1, 2)
|
locked := make([]oid.ID, 1, 2)
|
||||||
locked[0] = oidtest.ID()
|
locked[0] = oidtest.ID()
|
||||||
cnrLocked := cidtest.ID()
|
cnrLocked := cidtest.ID()
|
||||||
for range objNum {
|
for i := uint64(0); i < objNum; i++ {
|
||||||
obj := objecttest.Object()
|
obj := objecttest.Object()
|
||||||
obj.SetType(objectSDK.TypeRegular)
|
obj.SetType(objectSDK.TypeRegular)
|
||||||
|
|
||||||
|
|
|
@ -14,7 +14,7 @@ func TestLimiter(t *testing.T) {
|
||||||
l := newFlushLimiter(uint64(maxSize))
|
l := newFlushLimiter(uint64(maxSize))
|
||||||
var currSize atomic.Int64
|
var currSize atomic.Int64
|
||||||
var eg errgroup.Group
|
var eg errgroup.Group
|
||||||
for range 10_000 {
|
for i := 0; i < 10_000; i++ {
|
||||||
eg.Go(func() error {
|
eg.Go(func() error {
|
||||||
defer l.release(single)
|
defer l.release(single)
|
||||||
defer currSize.Add(-1)
|
defer currSize.Add(-1)
|
||||||
|
|
|
@ -4,11 +4,11 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
|
||||||
|
internalNet "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net"
|
||||||
morphmetrics "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/metrics"
|
morphmetrics "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/metrics"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
|
||||||
lru "github.com/hashicorp/golang-lru/v2"
|
lru "github.com/hashicorp/golang-lru/v2"
|
||||||
|
@ -48,7 +48,7 @@ type cfg struct {
|
||||||
|
|
||||||
morphCacheMetrics metrics.MorphCacheMetrics
|
morphCacheMetrics metrics.MorphCacheMetrics
|
||||||
|
|
||||||
dialerSource DialerSource
|
dialerSource *internalNet.DialerSource
|
||||||
}
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -68,7 +68,6 @@ func defaultConfig() *cfg {
|
||||||
Scopes: transaction.Global,
|
Scopes: transaction.Global,
|
||||||
},
|
},
|
||||||
morphCacheMetrics: &morphmetrics.NoopMorphCacheMetrics{},
|
morphCacheMetrics: &morphmetrics.NoopMorphCacheMetrics{},
|
||||||
dialerSource: &noopDialerSource{},
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -297,17 +296,7 @@ func WithMorphCacheMetrics(morphCacheMetrics metrics.MorphCacheMetrics) Option {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type DialerSource interface {
|
func WithDialerSource(ds *internalNet.DialerSource) Option {
|
||||||
NetContextDialer() func(context.Context, string, string) (net.Conn, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
type noopDialerSource struct{}
|
|
||||||
|
|
||||||
func (ds *noopDialerSource) NetContextDialer() func(context.Context, string, string) (net.Conn, error) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func WithDialerSource(ds DialerSource) Option {
|
|
||||||
return func(c *cfg) {
|
return func(c *cfg) {
|
||||||
c.dialerSource = ds
|
c.dialerSource = ds
|
||||||
}
|
}
|
||||||
|
|
|
@ -8,7 +8,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
func tickN(t *timer.BlockTimer, n uint32) {
|
func tickN(t *timer.BlockTimer, n uint32) {
|
||||||
for range n {
|
for i := uint32(0); i < n; i++ {
|
||||||
t.Tick(0)
|
t.Tick(0)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -537,7 +537,10 @@ func (ac *apeChecker) isContainerKey(pk []byte, cnrID cid.ID, cont *containercor
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if isContainerNode(nm, pk, binCnrID, cont) {
|
in, err := isContainerNode(nm, pk, binCnrID, cont)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
} else if in {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -548,24 +551,24 @@ func (ac *apeChecker) isContainerKey(pk []byte, cnrID cid.ID, cont *containercor
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return isContainerNode(nm, pk, binCnrID, cont), nil
|
return isContainerNode(nm, pk, binCnrID, cont)
|
||||||
}
|
}
|
||||||
|
|
||||||
func isContainerNode(nm *netmapSDK.NetMap, pk, binCnrID []byte, cont *containercore.Container) bool {
|
func isContainerNode(nm *netmapSDK.NetMap, pk, binCnrID []byte, cont *containercore.Container) (bool, error) {
|
||||||
// It could an error only if the network map doesn't have enough nodes to
|
cnrVectors, err := nm.ContainerNodes(cont.Value.PlacementPolicy(), binCnrID)
|
||||||
// fulfil the policy. It's a logical error that doesn't affect an actor role
|
if err != nil {
|
||||||
// determining, so we ignore it
|
return false, err
|
||||||
cnrVectors, _ := nm.ContainerNodes(cont.Value.PlacementPolicy(), binCnrID)
|
}
|
||||||
|
|
||||||
for i := range cnrVectors {
|
for i := range cnrVectors {
|
||||||
for j := range cnrVectors[i] {
|
for j := range cnrVectors[i] {
|
||||||
if bytes.Equal(cnrVectors[i][j].PublicKey(), pk) {
|
if bytes.Equal(cnrVectors[i][j].PublicKey(), pk) {
|
||||||
return true
|
return true, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return false
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ac *apeChecker) namespaceByOwner(owner *refs.OwnerID) (string, error) {
|
func (ac *apeChecker) namespaceByOwner(owner *refs.OwnerID) (string, error) {
|
||||||
|
|
|
@ -50,7 +50,7 @@ func testGenerateMetaHeader(depth uint32, b *acl.BearerToken, s *session.Token)
|
||||||
metaHeader.SetBearerToken(b)
|
metaHeader.SetBearerToken(b)
|
||||||
metaHeader.SetSessionToken(s)
|
metaHeader.SetSessionToken(s)
|
||||||
|
|
||||||
for range depth {
|
for i := uint32(0); i < depth; i++ {
|
||||||
link := metaHeader
|
link := metaHeader
|
||||||
metaHeader = new(session.RequestMetaHeader)
|
metaHeader = new(session.RequestMetaHeader)
|
||||||
metaHeader.SetOrigin(link)
|
metaHeader.SetOrigin(link)
|
||||||
|
|
|
@ -284,7 +284,7 @@ func (e *ECWriter) writePart(ctx context.Context, obj *objectSDK.Object, partIdx
|
||||||
}
|
}
|
||||||
|
|
||||||
// try to save to any node not visited by current part
|
// try to save to any node not visited by current part
|
||||||
for i := range nodes {
|
for i := range len(nodes) {
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
return ctx.Err()
|
return ctx.Err()
|
||||||
|
|
|
@ -116,6 +116,7 @@ func (s *Streamer) init(ctx context.Context, req *objectV2.PatchRequest) error {
|
||||||
Config: s.Config,
|
Config: s.Config,
|
||||||
Common: commonPrm,
|
Common: commonPrm,
|
||||||
Header: objectSDK.NewFromV2(oV2),
|
Header: objectSDK.NewFromV2(oV2),
|
||||||
|
SignRequestPrivateKey: s.localNodeKey,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("target creation: %w", err)
|
return fmt.Errorf("target creation: %w", err)
|
||||||
|
|
|
@ -1,207 +0,0 @@
|
||||||
package tree
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"encoding/hex"
|
|
||||||
"fmt"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client"
|
|
||||||
core "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
|
|
||||||
frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid"
|
|
||||||
checkercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/common/ape"
|
|
||||||
containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
|
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
|
||||||
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
|
|
||||||
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
|
|
||||||
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine/inmemory"
|
|
||||||
nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
|
|
||||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
|
||||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
containerID = "73tQMTYyUkTgmvPR1HWib6pndbhSoBovbnMF7Pws8Rcy"
|
|
||||||
|
|
||||||
senderPrivateKey, _ = keys.NewPrivateKey()
|
|
||||||
|
|
||||||
senderKey = hex.EncodeToString(senderPrivateKey.PublicKey().Bytes())
|
|
||||||
|
|
||||||
rootCnr = &core.Container{Value: containerSDK.Container{}}
|
|
||||||
)
|
|
||||||
|
|
||||||
type frostfsIDProviderMock struct {
|
|
||||||
subjects map[util.Uint160]*client.Subject
|
|
||||||
subjectsExtended map[util.Uint160]*client.SubjectExtended
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *frostfsIDProviderMock) GetSubject(key util.Uint160) (*client.Subject, error) {
|
|
||||||
v, ok := f.subjects[key]
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("%s", frostfsidcore.SubjectNotFoundErrorMessage)
|
|
||||||
}
|
|
||||||
return v, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *frostfsIDProviderMock) GetSubjectExtended(key util.Uint160) (*client.SubjectExtended, error) {
|
|
||||||
v, ok := f.subjectsExtended[key]
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("%s", frostfsidcore.SubjectNotFoundErrorMessage)
|
|
||||||
}
|
|
||||||
return v, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ frostfsidcore.SubjectProvider = (*frostfsIDProviderMock)(nil)
|
|
||||||
|
|
||||||
func newFrostfsIDProviderMock(t *testing.T) *frostfsIDProviderMock {
|
|
||||||
return &frostfsIDProviderMock{
|
|
||||||
subjects: map[util.Uint160]*client.Subject{
|
|
||||||
scriptHashFromSenderKey(t, senderKey): {
|
|
||||||
Namespace: "testnamespace",
|
|
||||||
Name: "test",
|
|
||||||
KV: map[string]string{
|
|
||||||
"tag-attr1": "value1",
|
|
||||||
"tag-attr2": "value2",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
subjectsExtended: map[util.Uint160]*client.SubjectExtended{
|
|
||||||
scriptHashFromSenderKey(t, senderKey): {
|
|
||||||
Namespace: "testnamespace",
|
|
||||||
Name: "test",
|
|
||||||
KV: map[string]string{
|
|
||||||
"tag-attr1": "value1",
|
|
||||||
"tag-attr2": "value2",
|
|
||||||
},
|
|
||||||
Groups: []*client.Group{
|
|
||||||
{
|
|
||||||
ID: 1,
|
|
||||||
Name: "test",
|
|
||||||
Namespace: "testnamespace",
|
|
||||||
KV: map[string]string{
|
|
||||||
"attr1": "value1",
|
|
||||||
"attr2": "value2",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func scriptHashFromSenderKey(t *testing.T, senderKey string) util.Uint160 {
|
|
||||||
pk, err := keys.NewPublicKeyFromString(senderKey)
|
|
||||||
require.NoError(t, err)
|
|
||||||
return pk.GetScriptHash()
|
|
||||||
}
|
|
||||||
|
|
||||||
type stMock struct{}
|
|
||||||
|
|
||||||
func (m *stMock) CurrentEpoch() uint64 {
|
|
||||||
return 8
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCheckAPE(t *testing.T) {
|
|
||||||
cid := cid.ID{}
|
|
||||||
_ = cid.DecodeString(containerID)
|
|
||||||
|
|
||||||
t.Run("put non-tombstone rule won't affect tree remove", func(t *testing.T) {
|
|
||||||
los := inmemory.NewInmemoryLocalStorage()
|
|
||||||
mcs := inmemory.NewInmemoryMorphRuleChainStorage()
|
|
||||||
fid := newFrostfsIDProviderMock(t)
|
|
||||||
s := Service{
|
|
||||||
cfg: cfg{
|
|
||||||
frostfsidSubjectProvider: fid,
|
|
||||||
},
|
|
||||||
apeChecker: checkercore.New(los, mcs, fid, &stMock{}),
|
|
||||||
}
|
|
||||||
|
|
||||||
los.AddOverride(chain.Ingress, engine.ContainerTarget(containerID), &chain.Chain{
|
|
||||||
Rules: []chain.Rule{
|
|
||||||
{
|
|
||||||
Status: chain.AccessDenied,
|
|
||||||
Actions: chain.Actions{Names: []string{nativeschema.MethodPutObject}},
|
|
||||||
Resources: chain.Resources{
|
|
||||||
Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, containerID)},
|
|
||||||
},
|
|
||||||
Condition: []chain.Condition{
|
|
||||||
{
|
|
||||||
Op: chain.CondStringNotEquals,
|
|
||||||
Kind: chain.KindResource,
|
|
||||||
Key: nativeschema.PropertyKeyObjectType,
|
|
||||||
Value: "TOMBSTONE",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
MatchType: chain.MatchTypeFirstMatch,
|
|
||||||
})
|
|
||||||
|
|
||||||
mcs.AddMorphRuleChain(chain.Ingress, engine.ContainerTarget(containerID), &chain.Chain{
|
|
||||||
Rules: []chain.Rule{
|
|
||||||
{
|
|
||||||
Status: chain.Allow,
|
|
||||||
Actions: chain.Actions{Names: []string{nativeschema.MethodDeleteObject}},
|
|
||||||
Resources: chain.Resources{
|
|
||||||
Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, containerID)},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
MatchType: chain.MatchTypeFirstMatch,
|
|
||||||
})
|
|
||||||
|
|
||||||
err := s.checkAPE(context.Background(), nil, rootCnr, cid, acl.OpObjectDelete, acl.RoleOwner, senderPrivateKey.PublicKey())
|
|
||||||
require.NoError(t, err)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("delete rule won't affect tree add", func(t *testing.T) {
|
|
||||||
los := inmemory.NewInmemoryLocalStorage()
|
|
||||||
mcs := inmemory.NewInmemoryMorphRuleChainStorage()
|
|
||||||
fid := newFrostfsIDProviderMock(t)
|
|
||||||
s := Service{
|
|
||||||
cfg: cfg{
|
|
||||||
frostfsidSubjectProvider: fid,
|
|
||||||
},
|
|
||||||
apeChecker: checkercore.New(los, mcs, fid, &stMock{}),
|
|
||||||
}
|
|
||||||
|
|
||||||
los.AddOverride(chain.Ingress, engine.ContainerTarget(containerID), &chain.Chain{
|
|
||||||
Rules: []chain.Rule{
|
|
||||||
{
|
|
||||||
Status: chain.AccessDenied,
|
|
||||||
Actions: chain.Actions{Names: []string{nativeschema.MethodDeleteObject}},
|
|
||||||
Resources: chain.Resources{
|
|
||||||
Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, containerID)},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
MatchType: chain.MatchTypeFirstMatch,
|
|
||||||
})
|
|
||||||
|
|
||||||
mcs.AddMorphRuleChain(chain.Ingress, engine.ContainerTarget(containerID), &chain.Chain{
|
|
||||||
Rules: []chain.Rule{
|
|
||||||
{
|
|
||||||
Status: chain.Allow,
|
|
||||||
Actions: chain.Actions{Names: []string{nativeschema.MethodPutObject}},
|
|
||||||
Resources: chain.Resources{
|
|
||||||
Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, containerID)},
|
|
||||||
},
|
|
||||||
Condition: []chain.Condition{
|
|
||||||
{
|
|
||||||
Op: chain.CondStringNotEquals,
|
|
||||||
Kind: chain.KindResource,
|
|
||||||
Key: nativeschema.PropertyKeyObjectType,
|
|
||||||
Value: "TOMBSTONE",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
MatchType: chain.MatchTypeFirstMatch,
|
|
||||||
})
|
|
||||||
|
|
||||||
err := s.checkAPE(context.Background(), nil, rootCnr, cid, acl.OpObjectPut, acl.RoleOwner, senderPrivateKey.PublicKey())
|
|
||||||
require.NoError(t, err)
|
|
||||||
})
|
|
||||||
}
|
|
|
@ -210,7 +210,7 @@ func (s *Service) Remove(ctx context.Context, req *RemoveRequest) (*RemoveRespon
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
err := s.verifyClient(ctx, req, cid, b.GetBearerToken(), acl.OpObjectDelete)
|
err := s.verifyClient(ctx, req, cid, b.GetBearerToken(), acl.OpObjectPut)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,14 +3,8 @@ package httputil
|
||||||
import (
|
import (
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/http/pprof"
|
"net/http/pprof"
|
||||||
|
|
||||||
"github.com/felixge/fgprof"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
|
||||||
http.DefaultServeMux.Handle("/debug/fgprof", fgprof.Handler())
|
|
||||||
}
|
|
||||||
|
|
||||||
// initializes pprof package in order to
|
// initializes pprof package in order to
|
||||||
// register Prometheus handlers on http.DefaultServeMux.
|
// register Prometheus handlers on http.DefaultServeMux.
|
||||||
var _ = pprof.Handler("")
|
var _ = pprof.Handler("")
|
||||||
|
|
|
@ -19,7 +19,7 @@ import (
|
||||||
|
|
||||||
func GeneratePayloadPool(count uint, size uint) [][]byte {
|
func GeneratePayloadPool(count uint, size uint) [][]byte {
|
||||||
var pool [][]byte
|
var pool [][]byte
|
||||||
for range count {
|
for i := uint(0); i < count; i++ {
|
||||||
payload := make([]byte, size)
|
payload := make([]byte, size)
|
||||||
_, _ = rand.Read(payload)
|
_, _ = rand.Read(payload)
|
||||||
|
|
||||||
|
@ -30,8 +30,8 @@ func GeneratePayloadPool(count uint, size uint) [][]byte {
|
||||||
|
|
||||||
func GenerateAttributePool(count uint) []objectSDK.Attribute {
|
func GenerateAttributePool(count uint) []objectSDK.Attribute {
|
||||||
var pool []objectSDK.Attribute
|
var pool []objectSDK.Attribute
|
||||||
for i := range count {
|
for i := uint(0); i < count; i++ {
|
||||||
for j := range count {
|
for j := uint(0); j < count; j++ {
|
||||||
attr := *objectSDK.NewAttribute()
|
attr := *objectSDK.NewAttribute()
|
||||||
attr.SetKey(fmt.Sprintf("key%d", i))
|
attr.SetKey(fmt.Sprintf("key%d", i))
|
||||||
attr.SetValue(fmt.Sprintf("value%d", j))
|
attr.SetValue(fmt.Sprintf("value%d", j))
|
||||||
|
@ -43,7 +43,7 @@ func GenerateAttributePool(count uint) []objectSDK.Attribute {
|
||||||
|
|
||||||
func GenerateOwnerPool(count uint) []user.ID {
|
func GenerateOwnerPool(count uint) []user.ID {
|
||||||
var pool []user.ID
|
var pool []user.ID
|
||||||
for range count {
|
for i := uint(0); i < count; i++ {
|
||||||
pool = append(pool, usertest.ID())
|
pool = append(pool, usertest.ID())
|
||||||
}
|
}
|
||||||
return pool
|
return pool
|
||||||
|
@ -118,7 +118,7 @@ func WithPayloadFromPool(pool [][]byte) ObjectOption {
|
||||||
func WithAttributesFromPool(pool []objectSDK.Attribute, count uint) ObjectOption {
|
func WithAttributesFromPool(pool []objectSDK.Attribute, count uint) ObjectOption {
|
||||||
return func(obj *objectSDK.Object) {
|
return func(obj *objectSDK.Object) {
|
||||||
var attrs []objectSDK.Attribute
|
var attrs []objectSDK.Attribute
|
||||||
for range count {
|
for i := uint(0); i < count; i++ {
|
||||||
attrs = append(attrs, pool[rand.Intn(len(pool))])
|
attrs = append(attrs, pool[rand.Intn(len(pool))])
|
||||||
}
|
}
|
||||||
obj.SetAttributes(attrs...)
|
obj.SetAttributes(attrs...)
|
||||||
|
|
|
@ -29,7 +29,7 @@ func PopulateWithObjects(
|
||||||
) {
|
) {
|
||||||
digits := "0123456789"
|
digits := "0123456789"
|
||||||
|
|
||||||
for range count {
|
for i := uint(0); i < count; i++ {
|
||||||
obj := factory()
|
obj := factory()
|
||||||
|
|
||||||
id := []byte(fmt.Sprintf(
|
id := []byte(fmt.Sprintf(
|
||||||
|
@ -59,7 +59,7 @@ func PopulateWithBigObjects(
|
||||||
count uint,
|
count uint,
|
||||||
factory func() *objectSDK.Object,
|
factory func() *objectSDK.Object,
|
||||||
) {
|
) {
|
||||||
for range count {
|
for i := uint(0); i < count; i++ {
|
||||||
group.Go(func() error {
|
group.Go(func() error {
|
||||||
if err := populateWithBigObject(ctx, db, factory); err != nil {
|
if err := populateWithBigObject(ctx, db, factory); err != nil {
|
||||||
return fmt.Errorf("couldn't put a big object: %w", err)
|
return fmt.Errorf("couldn't put a big object: %w", err)
|
||||||
|
@ -154,7 +154,7 @@ func PopulateGraveyard(
|
||||||
wg := &sync.WaitGroup{}
|
wg := &sync.WaitGroup{}
|
||||||
wg.Add(int(count))
|
wg.Add(int(count))
|
||||||
|
|
||||||
for range count {
|
for i := uint(0); i < count; i++ {
|
||||||
obj := factory()
|
obj := factory()
|
||||||
|
|
||||||
prm := meta.PutPrm{}
|
prm := meta.PutPrm{}
|
||||||
|
@ -226,7 +226,7 @@ func PopulateLocked(
|
||||||
wg := &sync.WaitGroup{}
|
wg := &sync.WaitGroup{}
|
||||||
wg.Add(int(count))
|
wg.Add(int(count))
|
||||||
|
|
||||||
for range count {
|
for i := uint(0); i < count; i++ {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
|
|
||||||
obj := factory()
|
obj := factory()
|
||||||
|
|
|
@ -116,7 +116,7 @@ func populate() (err error) {
|
||||||
eg, ctx := errgroup.WithContext(ctx)
|
eg, ctx := errgroup.WithContext(ctx)
|
||||||
eg.SetLimit(int(jobs))
|
eg.SetLimit(int(jobs))
|
||||||
|
|
||||||
for range numContainers {
|
for i := uint(0); i < numContainers; i++ {
|
||||||
cid := cidtest.ID()
|
cid := cidtest.ID()
|
||||||
|
|
||||||
for _, typ := range types {
|
for _, typ := range types {
|
||||||
|
|
Loading…
Reference in a new issue