Compare commits

..

1 commit

Author SHA1 Message Date
b464c4c3cf Use netmap in tree pool requests
Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
2024-09-04 15:41:46 +03:00
52 changed files with 332 additions and 1061 deletions

2
.gitignore vendored
View file

@ -23,7 +23,7 @@ coverage.txt
coverage.html
# antlr tool jar
antlr*.jar
antlr-*.jar
# tempfiles
.cache

View file

@ -63,6 +63,5 @@ linters:
- funlen
- gocognit
- contextcheck
- protogetter
disable-all: true
fast: false

View file

@ -1,6 +1,6 @@
#!/usr/bin/make -f
ANTLR_VERSION=4.13.1
ANTLR_VERSION="4.13.0"
TMP_DIR := .cache
LINT_VERSION ?= 1.60.1
TRUECLOUDLAB_LINT_VERSION ?= 0.0.6
@ -53,8 +53,7 @@ format:
policy:
@wget -q https://www.antlr.org/download/antlr-${ANTLR_VERSION}-complete.jar -O antlr4-tool.jar
@java -Xmx500M -cp antlr4-tool.jar org.antlr.v4.Tool -Dlanguage=Go \
-no-listener -visitor netmap/parser/Query.g4 netmap/parser/QueryLexer.g4
@java -Xmx500M -cp "`pwd`/antlr4-tool.jar" "org.antlr.v4.Tool" -o `pwd`/netmap/parser/ -Dlanguage=Go -no-listener -visitor `pwd`/netmap/parser/Query.g4 `pwd`/netmap/parser/QueryLexer.g4
# Run `make %` in truecloudlab/frostfs-sdk-go container(Golang+Java)
docker/%:
@ -78,15 +77,3 @@ help:
@echo ' Targets:'
@echo ''
@awk '/^#/{ comment = substr($$0,3) } comment && /^[a-zA-Z][a-zA-Z0-9_-]+ ?:/{ print " ", $$1, comment }' $(MAKEFILE_LIST) | column -t -s ':' | grep -v 'IGNORE' | sort -u
# Activate pre-commit hooks
pre-commit:
pre-commit install --hook-type pre-commit
# Deactivate pre-commit hooks
unpre-commit:
pre-commit uninstall --hook-type pre-commit
# Run pre-commit hooks
pre-commit-run:
@pre-commit run --all-files --hook-stage manual

View file

@ -65,7 +65,7 @@ func (c *Client) APEManagerListChains(ctx context.Context, prm PrmAPEManagerList
var res ResAPEManagerListChains
res.st, err = c.processResponse(resp)
if err != nil || !apistatus.IsSuccessful(res.st) {
return &res, err
return nil, err
}
for _, ch := range resp.GetBody().GetChains() {

View file

@ -11,8 +11,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
// Client represents virtual connection to the FrostFS network to communicate
@ -100,28 +98,13 @@ func (c *Client) Dial(ctx context.Context, prm PrmDial) error {
c.setFrostFSAPIServer((*coreServer)(&c.c))
ctx, cancel := context.WithTimeout(ctx, prm.DialTimeout)
defer cancel()
// TODO: (neofs-api-go#382) perform generic dial stage of the client.Client
_, err := rpc.Balance(&c.c, new(v2accounting.BalanceRequest),
client.WithContext(ctx),
)
if err != nil {
var ctxErr error
// return context errors since they signal about dial problem
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
ctxErr = err
} else if st, ok := status.FromError(err); ok && st.Code() == codes.Canceled {
ctxErr = context.Canceled
} else if ok && st.Code() == codes.DeadlineExceeded {
ctxErr = context.DeadlineExceeded
}
if ctxErr != nil {
if conn := c.c.Conn(); conn != nil {
_ = conn.Close()
}
return ctxErr
}
// return context errors since they signal about dial problem
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
return err
}
return nil

View file

@ -239,8 +239,12 @@ func (c *Client) NetMapSnapshot(ctx context.Context, _ PrmNetMapSnapshot) (*ResN
var res ResNetMapSnapshot
res.st, err = c.processResponse(resp)
if err != nil || !apistatus.IsSuccessful(res.st) {
return &res, err
if err != nil {
return nil, err
}
if !apistatus.IsSuccessful(res.st) {
return &res, nil
}
const fieldNetMap = "network map"

View file

@ -148,8 +148,12 @@ func (c *Client) ObjectDelete(ctx context.Context, prm PrmObjectDelete) (*ResObj
var res ResObjectDelete
res.st, err = c.processResponse(resp)
if err != nil || !apistatus.IsSuccessful(res.st) {
return &res, err
if err != nil {
return nil, err
}
if !apistatus.IsSuccessful(res.st) {
return &res, nil
}
const fieldTombstone = "tombstone"

View file

@ -492,8 +492,12 @@ func (c *Client) ObjectHead(ctx context.Context, prm PrmObjectHead) (*ResObjectH
var res ResObjectHead
res.st, err = c.processResponse(resp)
if err != nil || !apistatus.IsSuccessful(res.st) {
return &res, err
if err != nil {
return nil, err
}
if !apistatus.IsSuccessful(res.st) {
return &res, nil
}
res.idObj = *prm.ObjectID

View file

@ -189,8 +189,12 @@ func (c *Client) ObjectHash(ctx context.Context, prm PrmObjectHash) (*ResObjectH
var res ResObjectHash
res.st, err = c.processResponse(resp)
if err != nil || !apistatus.IsSuccessful(res.st) {
return &res, err
if err != nil {
return nil, err
}
if !apistatus.IsSuccessful(res.st) {
return &res, nil
}
res.checksums = resp.GetBody().GetHashList()

View file

@ -106,6 +106,7 @@ func (c *Client) ObjectPatchInit(ctx context.Context, prm PrmObjectPatch) (Objec
}
objectPatcher.client = c
objectPatcher.stream = stream
objectPatcher.firstPatchPayload = true
if prm.MaxChunkLength > 0 {
objectPatcher.maxChunkLen = prm.MaxChunkLength
@ -153,6 +154,8 @@ type objectPatcher struct {
respV2 v2object.PatchResponse
maxChunkLen int
firstPatchPayload bool
}
func (x *objectPatcher) PatchAttributes(_ context.Context, newAttrs []object.Attribute, replace bool) bool {
@ -168,33 +171,19 @@ func (x *objectPatcher) PatchPayload(_ context.Context, rng *object.Range, paylo
buf := make([]byte, x.maxChunkLen)
for patchIter := 0; ; patchIter++ {
for {
n, err := payloadReader.Read(buf)
if err != nil && err != io.EOF {
x.err = fmt.Errorf("read payload: %w", err)
return false
}
if n == 0 {
if patchIter == 0 {
if rng.GetLength() == 0 {
x.err = errors.New("zero-length empty payload patch can't be applied")
return false
}
if !x.patch(&object.Patch{
Address: x.addr,
PayloadPatch: &object.PayloadPatch{
Range: rng,
Chunk: []byte{},
},
}) {
return false
}
}
break
}
rngPart := object.NewRange()
if patchIter == 0 {
if x.firstPatchPayload {
x.firstPatchPayload = false
rngPart.SetOffset(offset)
rngPart.SetLength(rng.GetLength())
} else {
@ -246,8 +235,12 @@ func (x *objectPatcher) Close(_ context.Context) (*ResObjectPatch, error) {
}
x.res.st, x.err = x.client.processResponse(&x.respV2)
if x.err != nil || !apistatus.IsSuccessful(x.res.st) {
return &x.res, x.err
if x.err != nil {
return nil, x.err
}
if !apistatus.IsSuccessful(x.res.st) {
return &x.res, nil
}
const fieldID = "ID"

View file

@ -170,11 +170,12 @@ func TestObjectPatcher(t *testing.T) {
pk, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
patcher := objectPatcher{
client: &Client{},
stream: m,
addr: oidtest.Address(),
key: pk,
maxChunkLen: test.maxChunkLen,
client: &Client{},
stream: m,
addr: oidtest.Address(),
key: pk,
maxChunkLen: test.maxChunkLen,
firstPatchPayload: true,
}
success := patcher.PatchAttributes(context.Background(), nil, false)
@ -193,93 +194,6 @@ func TestObjectPatcher(t *testing.T) {
}
}
func TestRepeatPayloadPatch(t *testing.T) {
t.Run("no payload patch partioning", func(t *testing.T) {
m := &mockPatchStream{}
pk, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
const maxChunkLen = 20
patcher := objectPatcher{
client: &Client{},
stream: m,
addr: oidtest.Address(),
key: pk,
maxChunkLen: maxChunkLen,
}
for _, pp := range []struct {
patchPayload string
rng *object.Range
}{
{
patchPayload: "xxxxxxxxxx",
rng: newRange(1, 6),
},
{
patchPayload: "yyyyyyyyyy",
rng: newRange(5, 9),
},
{
patchPayload: "zzzzzzzzzz",
rng: newRange(10, 0),
},
} {
success := patcher.PatchPayload(context.Background(), pp.rng, bytes.NewReader([]byte(pp.patchPayload)))
require.True(t, success)
}
requireRangeChunk(t, m.streamedPayloadPatches[0], 1, 6, "xxxxxxxxxx")
requireRangeChunk(t, m.streamedPayloadPatches[1], 5, 9, "yyyyyyyyyy")
requireRangeChunk(t, m.streamedPayloadPatches[2], 10, 0, "zzzzzzzzzz")
})
t.Run("payload patch partioning", func(t *testing.T) {
m := &mockPatchStream{}
pk, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
const maxChunkLen = 5
patcher := objectPatcher{
client: &Client{},
stream: m,
addr: oidtest.Address(),
key: pk,
maxChunkLen: maxChunkLen,
}
for _, pp := range []struct {
patchPayload string
rng *object.Range
}{
{
patchPayload: "xxxxxxxxxx",
rng: newRange(1, 6),
},
{
patchPayload: "yyyyyyyyyy",
rng: newRange(5, 9),
},
{
patchPayload: "zzzzzzzzzz",
rng: newRange(10, 0),
},
} {
success := patcher.PatchPayload(context.Background(), pp.rng, bytes.NewReader([]byte(pp.patchPayload)))
require.True(t, success)
}
requireRangeChunk(t, m.streamedPayloadPatches[0], 1, 6, "xxxxx")
requireRangeChunk(t, m.streamedPayloadPatches[1], 7, 0, "xxxxx")
requireRangeChunk(t, m.streamedPayloadPatches[2], 5, 9, "yyyyy")
requireRangeChunk(t, m.streamedPayloadPatches[3], 14, 0, "yyyyy")
requireRangeChunk(t, m.streamedPayloadPatches[4], 10, 0, "zzzzz")
requireRangeChunk(t, m.streamedPayloadPatches[5], 10, 0, "zzzzz")
})
}
func requireRangeChunk(t *testing.T, pp *object.PayloadPatch, offset, length int, chunk string) {
require.NotNil(t, pp)
require.Equal(t, uint64(offset), pp.Range.GetOffset())

View file

@ -156,8 +156,12 @@ func (x *objectWriterRaw) Close(_ context.Context) (*ResObjectPut, error) {
}
x.res.st, x.err = x.client.processResponse(&x.respV2)
if x.err != nil || !apistatus.IsSuccessful(x.res.st) {
return &x.res, x.err
if x.err != nil {
return nil, x.err
}
if !apistatus.IsSuccessful(x.res.st) {
return &x.res, nil
}
const fieldID = "ID"

View file

@ -167,7 +167,7 @@ func (c *Client) ObjectPutSingle(ctx context.Context, prm PrmObjectPutSingle) (*
var res ResObjectPutSingle
res.st, err = c.processResponse(resp)
if err != nil {
return &res, err
return nil, err
}
res.epoch = resp.GetMetaHeader().GetEpoch()

View file

@ -220,7 +220,7 @@ func (x *ObjectListReader) Close() (*ResObjectSearch, error) {
defer x.cancelCtxStream()
if x.err != nil && !errors.Is(x.err, io.EOF) {
return &x.res, x.err
return nil, x.err
}
return &x.res, nil

View file

@ -238,61 +238,3 @@ func (x *NodeUnderMaintenance) SetMessage(v string) {
func (x NodeUnderMaintenance) Message() string {
return x.v2.Message()
}
// InvalidArgument describes failure status related to invalid argument.
// Instances provide Status and StatusV2 interfaces.
type InvalidArgument struct {
v2 status.Status
}
const defaultInvalidArgumentMsg = "argument is invalid"
// Error implements the error interface.
func (x *InvalidArgument) Error() string {
msg := x.v2.Message()
if msg == "" {
msg = defaultInvalidArgumentMsg
}
return errMessageStatusV2(
globalizeCodeV2(status.InvalidArgument, status.GlobalizeCommonFail),
msg,
)
}
// implements local interface defined in FromStatusV2 func.
func (x *InvalidArgument) fromStatusV2(st *status.Status) {
x.v2 = *st
}
// ToStatusV2 implements StatusV2 interface method.
// If the value was returned by FromStatusV2, returns the source message.
// Otherwise, returns message with
// - code: INVALID_ARGUMENT;
// - string message: written message via SetMessage or
// "argument is invalid" as a default message;
// - details: empty.
func (x InvalidArgument) ToStatusV2() *status.Status {
x.v2.SetCode(globalizeCodeV2(status.InvalidArgument, status.GlobalizeCommonFail))
if x.v2.Message() == "" {
x.v2.SetMessage(defaultInvalidArgumentMsg)
}
return &x.v2
}
// SetMessage writes invalid argument failure message.
// Message should be used for debug purposes only.
//
// See also Message.
func (x *InvalidArgument) SetMessage(v string) {
x.v2.SetMessage(v)
}
// Message returns status message. Zero status returns empty message.
// Message should be used for debug purposes only.
//
// See also SetMessage.
func (x InvalidArgument) Message() string {
return x.v2.Message()
}

View file

@ -114,7 +114,7 @@ func TestNodeUnderMaintenance(t *testing.T) {
stV2 := st.ToStatusV2()
require.Equal(t, "node is under maintenance", stV2.Message())
require.Empty(t, "", stV2.Message())
})
t.Run("non-empty to V2", func(t *testing.T) {
@ -128,42 +128,3 @@ func TestNodeUnderMaintenance(t *testing.T) {
require.Equal(t, msg, stV2.Message())
})
}
func TestInvalidArgument(t *testing.T) {
t.Run("default", func(t *testing.T) {
var st apistatus.InvalidArgument
require.Empty(t, st.Message())
})
t.Run("custom message", func(t *testing.T) {
var st apistatus.InvalidArgument
msg := "some message"
st.SetMessage(msg)
stV2 := st.ToStatusV2()
require.Equal(t, msg, st.Message())
require.Equal(t, msg, stV2.Message())
})
t.Run("empty to V2", func(t *testing.T) {
var st apistatus.InvalidArgument
stV2 := st.ToStatusV2()
require.Equal(t, "argument is invalid", stV2.Message())
})
t.Run("non-empty to V2", func(t *testing.T) {
var st apistatus.InvalidArgument
msg := "some other msg"
st.SetMessage(msg)
stV2 := st.ToStatusV2()
require.Equal(t, msg, stV2.Message())
})
}

View file

@ -33,29 +33,12 @@ type StatusV2 interface {
//
// Common failures:
// - status.Internal: *ServerInternal;
// - status.WrongMagicNumber: *WrongMagicNumber;
// - status.SignatureVerificationFail: *SignatureVerification;
// - status.NodeUnderMaintenance: *NodeUnderMaintenance;
// - status.InvalidArgument: *InvalidArgument.
// - status.SignatureVerificationFail: *SignatureVerification.
//
// Object failures:
// - object.StatusLocked: *ObjectLocked;
// - object.StatusLockNonRegularObject: *LockNonRegularObject;
// - object.StatusAccessDenied: *ObjectAccessDenied;
// - object.StatusNotFound: *ObjectNotFound;
// - object.StatusAlreadyRemoved: *ObjectAlreadyRemoved;
// - object.StatusOutOfRange: *ObjectOutOfRange.
//
// Container failures:
// - container.StatusNotFound: *ContainerNotFound;
// - container.StatusEACLNotFound: *EACLNotFound.
//
// Session failures:
// - session.StatusTokenNotFound: *SessionTokenNotFound;
// - session.StatusTokenExpired: *SessionTokenExpired.
//
// APE Manager failures
// - apemanager.StatusAPEManagerAccessDenied: *APEManagerAccessDenied.
// - object.StatusLockNonRegularObject: *LockNonRegularObject.
// - object.StatusAccessDenied: *ObjectAccessDenied.
func FromStatusV2(st *status.Status) Status {
var decoder interface {
fromStatusV2(*status.Status)
@ -78,8 +61,6 @@ func FromStatusV2(st *status.Status) Status {
decoder = new(SignatureVerification)
case status.NodeUnderMaintenance:
decoder = new(NodeUnderMaintenance)
case status.InvalidArgument:
decoder = new(InvalidArgument)
}
case object.LocalizeFailStatus(&code):
switch code {

View file

@ -61,24 +61,6 @@ func TestToStatusV2(t *testing.T) {
}),
codeV2: 1025,
},
{
status: (statusConstructor)(func() apistatus.Status {
return new(apistatus.SignatureVerification)
}),
codeV2: 1026,
},
{
status: (statusConstructor)(func() apistatus.Status {
return new(apistatus.NodeUnderMaintenance)
}),
codeV2: 1027,
},
{
status: (statusConstructor)(func() apistatus.Status {
return new(apistatus.InvalidArgument)
}),
codeV2: 1028,
},
{
status: (statusConstructor)(func() apistatus.Status {
return new(apistatus.ObjectLocked)
@ -149,6 +131,12 @@ func TestToStatusV2(t *testing.T) {
}),
codeV2: 5120,
},
{
status: (statusConstructor)(func() apistatus.Status {
return new(apistatus.NodeUnderMaintenance)
}),
codeV2: 1027,
},
} {
var st apistatus.Status

View file

@ -308,7 +308,7 @@ func (x *Container) SetAttribute(key, value string) {
attrs := x.v2.GetAttributes()
ln := len(attrs)
for i := range ln {
for i := 0; i < ln; i++ {
if attrs[i].GetKey() == key {
attrs[i].SetValue(value)
return
@ -356,7 +356,8 @@ func (x Container) IterateUserAttributes(f func(key, val string)) {
attrs := x.v2.GetAttributes()
for _, attr := range attrs {
key := attr.GetKey()
if !strings.HasPrefix(key, container.SysAttributePrefix) {
if !strings.HasPrefix(key, container.SysAttributePrefix) &&
!strings.HasPrefix(key, container.SysAttributePrefixNeoFS) {
f(key, attr.GetValue())
}
}
@ -416,7 +417,8 @@ func DisableHomomorphicHashing(cnr *Container) {
//
// Zero Container has enabled hashing.
func IsHomomorphicHashingDisabled(cnr Container) bool {
return cnr.Attribute(container.SysAttributeHomomorphicHashing) == attributeHomoHashEnabled
return cnr.Attribute(container.SysAttributeHomomorphicHashing) == attributeHomoHashEnabled ||
cnr.Attribute(container.SysAttributeHomomorphicHashingNeoFS) == attributeHomoHashEnabled
}
// Domain represents information about container domain registered in the NNS
@ -465,6 +467,9 @@ func ReadDomain(cnr Container) (res Domain) {
if name := cnr.Attribute(container.SysAttributeName); name != "" {
res.SetName(name)
res.SetZone(cnr.Attribute(container.SysAttributeZone))
} else if name = cnr.Attribute(container.SysAttributeNameNeoFS); name != "" {
res.SetName(name)
res.SetZone(cnr.Attribute(container.SysAttributeZoneNeoFS))
}
return

View file

@ -150,7 +150,7 @@ func assertContainsAttribute(t *testing.T, m v2container.Container, key, val str
}
func TestContainer_Attribute(t *testing.T) {
const attrKey1, attrKey2 = v2container.SysAttributePrefix + "key1", v2container.SysAttributePrefix + "key2"
const attrKey1, attrKey2 = v2container.SysAttributePrefix + "key1", v2container.SysAttributePrefixNeoFS + "key2"
const attrVal1, attrVal2 = "val1", "val2"
val := containertest.Container()

View file

@ -286,13 +286,13 @@ func equalRecords(r1, r2 Record) bool {
return false
}
for i := range len(fs1) {
for i := 0; i < len(fs1); i++ {
if !equalFilters(fs1[i], fs2[i]) {
return false
}
}
for i := range len(ts1) {
for i := 0; i < len(ts1); i++ {
if !equalTargets(ts1[i], ts2[i]) {
return false
}

View file

@ -212,7 +212,7 @@ func EqualTables(t1, t2 Table) bool {
return false
}
for i := range len(rs1) {
for i := 0; i < len(rs1); i++ {
if !equalRecords(rs1[i], rs2[i]) {
return false
}

View file

@ -51,7 +51,7 @@ func SetTargetECDSAKeys(t *Target, pubs ...*ecdsa.PublicKey) {
binKeys = make([][]byte, 0, ln)
}
for i := range ln {
for i := 0; i < ln; i++ {
binKeys = append(binKeys, (*keys.PublicKey)(pubs[i]).Bytes())
}
@ -67,7 +67,7 @@ func TargetECDSAKeys(t *Target) []*ecdsa.PublicKey {
pubs := make([]*ecdsa.PublicKey, ln)
for i := range ln {
for i := 0; i < ln; i++ {
p := new(keys.PublicKey)
if p.DecodeBytes(binKeys[i]) == nil {
pubs[i] = (*ecdsa.PublicKey)(p)
@ -169,7 +169,7 @@ func equalTargets(t1, t2 Target) bool {
return false
}
for i := range len(keys1) {
for i := 0; i < len(keys1); i++ {
if !bytes.Equal(keys1[i], keys2[i]) {
return false
}

View file

@ -19,7 +19,7 @@ func baseBenchmarkTableBinaryComparison(b *testing.B, factor int) {
b.StopTimer()
b.ResetTimer()
b.StartTimer()
for range b.N {
for i := 0; i < b.N; i++ {
got, _ := t.Marshal()
if !bytes.Equal(exp, got) {
b.Fail()
@ -38,7 +38,7 @@ func baseBenchmarkTableEqualsComparison(b *testing.B, factor int) {
b.StopTimer()
b.ResetTimer()
b.StartTimer()
for range b.N {
for i := 0; i < b.N; i++ {
if !eacl.EqualTables(*t, *t2) {
b.Fail()
}
@ -76,7 +76,7 @@ func TargetN(n int) *eacl.Target {
x.SetRole(eacl.RoleSystem)
keys := make([][]byte, n)
for i := range n {
for i := 0; i < n; i++ {
keys[i] = make([]byte, 32)
rand.Read(keys[i])
}
@ -94,7 +94,7 @@ func RecordN(n int) *eacl.Record {
x.SetOperation(eacl.OperationRangeHash)
x.SetTargets(*TargetN(n))
for range n {
for i := 0; i < n; i++ {
x.AddFilter(eacl.HeaderFromObject, eacl.MatchStringEqual, "", cidtest.ID().EncodeToString())
}
@ -106,7 +106,7 @@ func TableN(n int) *eacl.Table {
x.SetCID(cidtest.ID())
for range n {
for i := 0; i < n; i++ {
x.AddRecord(RecordN(n))
}

22
go.mod
View file

@ -3,11 +3,11 @@ module git.frostfs.info/TrueCloudLab/frostfs-sdk-go
go 1.22
require (
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20241011114054-f0fc40e116d1
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240902111049-c11f50efeccb
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e
git.frostfs.info/TrueCloudLab/hrw v1.2.1
git.frostfs.info/TrueCloudLab/tzhash v1.8.0
github.com/antlr4-go/antlr/v4 v4.13.1
github.com/antlr4-go/antlr/v4 v4.13.0
github.com/google/uuid v1.6.0
github.com/hashicorp/golang-lru/v2 v2.0.7
github.com/klauspost/reedsolomon v1.12.1
@ -16,8 +16,8 @@ require (
github.com/nspcc-dev/neo-go v0.106.2
github.com/stretchr/testify v1.9.0
go.uber.org/zap v1.27.0
google.golang.org/grpc v1.66.2
google.golang.org/protobuf v1.34.1
google.golang.org/grpc v1.63.2
google.golang.org/protobuf v1.33.0
gopkg.in/yaml.v3 v3.0.1
)
@ -49,11 +49,11 @@ require (
github.com/twmb/murmur3 v1.1.8 // indirect
go.etcd.io/bbolt v1.3.9 // indirect
go.uber.org/multierr v1.11.0 // indirect
golang.org/x/crypto v0.24.0 // indirect
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 // indirect
golang.org/x/net v0.26.0 // indirect
golang.org/x/sync v0.7.0 // indirect
golang.org/x/sys v0.21.0 // indirect
golang.org/x/text v0.16.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240604185151-ef581f913117 // indirect
golang.org/x/crypto v0.21.0 // indirect
golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 // indirect
golang.org/x/net v0.23.0 // indirect
golang.org/x/sync v0.6.0 // indirect
golang.org/x/sys v0.18.0 // indirect
golang.org/x/text v0.14.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de // indirect
)

60
go.sum
View file

@ -1,5 +1,5 @@
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20241011114054-f0fc40e116d1 h1:ivcdxQeQDnx4srF2ezoaeVlF0FAycSAztwfIUJnUI4s=
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20241011114054-f0fc40e116d1/go.mod h1:F5GS7hRb62PUy5sTYDC4ajVdeffoAfjHSSHTKUJEaYU=
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240902111049-c11f50efeccb h1:p9ByDsw+H6p6LyYSx8LKFtAG/oPKQpDVMNfjPqdevTw=
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240902111049-c11f50efeccb/go.mod h1:BDnEpkKMykCS8u1nLzR6SgNzCv6885RWlo5TnravQuI=
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e h1:kcBqZBiFIUBATUqEuvVigtkJJWQ2Gug/eYXn967o3M4=
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e/go.mod h1:F/fe1OoIDKr5Bz99q4sriuHDuf3aZefZy9ZsCqEtgxc=
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSVCB8JNSfPG7Uk4r2oSk=
@ -12,14 +12,14 @@ git.frostfs.info/TrueCloudLab/tzhash v1.8.0 h1:UFMnUIk0Zh17m8rjGHJMqku2hCgaXDqjq
git.frostfs.info/TrueCloudLab/tzhash v1.8.0/go.mod h1:dhY+oy274hV8wGvGL4MwwMpdL3GYvaX1a8GQZQHvlF8=
github.com/VictoriaMetrics/easyproto v0.1.4 h1:r8cNvo8o6sR4QShBXQd1bKw/VVLSQma/V2KhTBPf+Sc=
github.com/VictoriaMetrics/easyproto v0.1.4/go.mod h1:QlGlzaJnDfFd8Lk6Ci/fuLxfTo3/GThPs2KH23mv710=
github.com/antlr4-go/antlr/v4 v4.13.1 h1:SqQKkuVZ+zWkMMNkjy5FZe5mr5WURWnlpmOuzYWrPrQ=
github.com/antlr4-go/antlr/v4 v4.13.1/go.mod h1:GKmUxMtwp6ZgGwZSva4eWPC5mS6vUAmOABFgjdkM7Nw=
github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI=
github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bits-and-blooms/bitset v1.8.0 h1:FD+XqgOZDUxxZ8hzoBFuV9+cGWY9CslN6d5MS5JVb4c=
github.com/bits-and-blooms/bitset v1.8.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/YjhQ=
github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI=
github.com/consensys/gnark-crypto v0.12.2-0.20231013160410-1f65e75b6dfb h1:f0BMgIjhZy4lSRHCXFbQst85f5agZAjtDMixQqBWNpc=
@ -148,21 +148,21 @@ go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI=
golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM=
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 h1:vr/HnozRka3pE4EsMEg1lgkXJkTFJCVUX+S/ZT6wYzM=
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc=
golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA=
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 h1:LfspQV/FYTatPTr/3HzIcmiUFH7PGP+OQ6mgDYo3yuQ=
golang.org/x/exp v0.0.0-20240222234643-814bf88cf225/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc=
golang.org/x/mod v0.16.0 h1:QX4fJ0Rr5cPQCF7O9lh9Se4pmwfwskqZfq5moyldzic=
golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ=
golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE=
golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs=
golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -174,33 +174,33 @@ golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws=
golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA=
golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0=
golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4=
golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8=
golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg=
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
golang.org/x/tools v0.19.0 h1:tfGCXNR1OsFG+sVdLAitlpjAvD/I6dHDKnYrpEZUHkw=
golang.org/x/tools v0.19.0/go.mod h1:qoJWxmGSIBmAeriMx19ogtrEPrGtDbPK634QFIcLAhc=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240604185151-ef581f913117 h1:1GBuWVLM/KMVUv1t1En5Gs+gFZCNd360GGb4sSxtrhU=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240604185151-ef581f913117/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0=
google.golang.org/grpc v1.66.2 h1:3QdXkuq3Bkh7w+ywLdLvM56cmGvQHUMZpiCzt6Rqaoo=
google.golang.org/grpc v1.66.2/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de h1:cZGRis4/ot9uVm639a+rHCUaG0JJHEsdyzSQTMX+suY=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:H4O17MA/PE9BsGx3w+a+W2VOLLD1Qf7oJneAoU6WktY=
google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM=
google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg=
google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=

View file

@ -47,7 +47,7 @@ func BenchmarkNetmap_ContainerNodes(b *testing.B) {
b.ResetTimer()
b.ReportAllocs()
for range b.N {
for i := 0; i < b.N; i++ {
_, err := nm.ContainerNodes(p, pivot)
if err != nil {
b.Fatal(err)

View file

@ -460,10 +460,6 @@ func (x *NodeInfo) SortAttributes() {
// SetOffline sets the state of the node to "offline". When a node updates
// information about itself in the network map, this action is interpreted as
// an intention to leave the network.
//
// See also IsOffline.
//
// Deprecated: use SetStatus instead.
func (x *NodeInfo) SetOffline() {
x.m.SetState(netmap.Offline)
}
@ -474,8 +470,6 @@ func (x *NodeInfo) SetOffline() {
// mean online).
//
// See also SetOffline.
//
// Deprecated: use Status instead.
func (x NodeInfo) IsOffline() bool {
return x.m.GetState() == netmap.Offline
}
@ -485,8 +479,6 @@ func (x NodeInfo) IsOffline() bool {
// action is interpreted as an intention to enter the network.
//
// See also IsOnline.
//
// Deprecated: use SetStatus instead.
func (x *NodeInfo) SetOnline() {
x.m.SetState(netmap.Online)
}
@ -497,8 +489,6 @@ func (x *NodeInfo) SetOnline() {
// mean offline).
//
// See also SetOnline.
//
// Deprecated: use Status instead.
func (x NodeInfo) IsOnline() bool {
return x.m.GetState() == netmap.Online
}
@ -508,8 +498,6 @@ func (x NodeInfo) IsOnline() bool {
// state declares temporal unavailability for a node.
//
// See also IsMaintenance.
//
// Deprecated: use SetStatus instead.
func (x *NodeInfo) SetMaintenance() {
x.m.SetState(netmap.Maintenance)
}
@ -519,63 +507,6 @@ func (x *NodeInfo) SetMaintenance() {
// Zero NodeInfo has undefined state.
//
// See also SetMaintenance.
//
// Deprecated: use Status instead.
func (x NodeInfo) IsMaintenance() bool {
return x.m.GetState() == netmap.Maintenance
}
type NodeState netmap.NodeState
const (
UnspecifiedState = NodeState(netmap.UnspecifiedState)
Online = NodeState(netmap.Online)
Offline = NodeState(netmap.Offline)
Maintenance = NodeState(netmap.Maintenance)
)
// ToV2 converts NodeState to v2.
func (ns NodeState) ToV2() netmap.NodeState {
return netmap.NodeState(ns)
}
// FromV2 reads NodeState to v2.
func (ns *NodeState) FromV2(state netmap.NodeState) {
*ns = NodeState(state)
}
// Status returns the current state of the node in the network map.
//
// Zero NodeInfo has an undefined state, neither online nor offline.
func (x NodeInfo) Status() NodeState {
return NodeState(x.m.GetState())
}
// SetState updates the state of the node in the network map.
//
// The state determines the node's current status within the network:
// - "online": Indicates the node intends to enter the network.
// - "offline": Indicates the node intends to leave the network.
// - "maintenance": Indicates the node is temporarily unavailable.
//
// See also Status.
func (x *NodeInfo) SetStatus(state NodeState) {
x.m.SetState(netmap.NodeState(state))
}
// String implements fmt.Stringer.
//
// String is designed to be human-readable, and its format MAY differ between
// SDK versions.
func (ns NodeState) String() string {
return netmap.NodeState(ns).String()
}
// IsOnline checks if the current state is "online".
func (ns NodeState) IsOnline() bool { return ns == Online }
// IsOffline checks if the current state is "offline".
func (ns NodeState) IsOffline() bool { return ns == Offline }
// IsMaintenance checks if the current state is "maintenance".
func (ns NodeState) IsMaintenance() bool { return ns == Maintenance }

View file

@ -3,7 +3,6 @@ package netmap
import (
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/netmap"
"github.com/stretchr/testify/require"
)
@ -24,75 +23,27 @@ func TestNodeInfo_SetAttribute(t *testing.T) {
require.Equal(t, val, n.Attribute(key))
}
func TestNodeState(t *testing.T) {
m := map[NodeState]netmap.NodeState{
UnspecifiedState: netmap.UnspecifiedState,
Online: netmap.Online,
Offline: netmap.Offline,
Maintenance: netmap.Maintenance,
}
t.Run("from sdk to v2", func(t *testing.T) {
for stateSDK, stateV2 := range m {
require.Equal(t, stateV2, stateSDK.ToV2())
}
})
t.Run("from v2 to sdk", func(t *testing.T) {
for stateSDK, stateV2 := range m {
var state NodeState
state.FromV2(stateV2)
require.Equal(t, stateSDK, state)
}
})
}
func TestNodeInfo_Status(t *testing.T) {
t.Run("deprecated getters/setters", func(t *testing.T) {
var n NodeInfo
var n NodeInfo
require.False(t, n.IsOnline())
require.False(t, n.IsOffline())
require.False(t, n.IsMaintenance())
require.False(t, n.IsOnline())
require.False(t, n.IsOffline())
require.False(t, n.IsMaintenance())
n.SetOnline()
require.True(t, n.IsOnline())
require.False(t, n.IsOffline())
require.False(t, n.IsMaintenance())
n.SetOnline()
require.True(t, n.IsOnline())
require.False(t, n.IsOffline())
require.False(t, n.IsMaintenance())
n.SetOffline()
require.True(t, n.IsOffline())
require.False(t, n.IsOnline())
require.False(t, n.IsMaintenance())
n.SetOffline()
require.True(t, n.IsOffline())
require.False(t, n.IsOnline())
require.False(t, n.IsMaintenance())
n.SetMaintenance()
require.True(t, n.IsMaintenance())
require.False(t, n.IsOnline())
require.False(t, n.IsOffline())
})
t.Run("brand new getters/setters", func(t *testing.T) {
var n NodeInfo
require.False(t, n.Status().IsOnline())
require.False(t, n.Status().IsOffline())
require.False(t, n.Status().IsMaintenance())
n.SetStatus(Online)
require.True(t, n.Status().IsOnline())
require.False(t, n.Status().IsOffline())
require.False(t, n.Status().IsMaintenance())
n.SetStatus(Offline)
require.False(t, n.Status().IsOnline())
require.True(t, n.Status().IsOffline())
require.False(t, n.Status().IsMaintenance())
n.SetStatus(Maintenance)
require.False(t, n.Status().IsOnline())
require.False(t, n.Status().IsOffline())
require.True(t, n.Status().IsMaintenance())
})
n.SetMaintenance()
require.True(t, n.IsMaintenance())
require.False(t, n.IsOnline())
require.False(t, n.IsOffline())
}
func TestNodeInfo_ExternalAddr(t *testing.T) {

View file

@ -19,10 +19,10 @@ repStmt:
cbfStmt: CBF BackupFactor = NUMBER1; // container backup factor
selectStmt:
SELECT Count = NUMBER1 // number of nodes to select without container backup factor *)
(IN clause? Bucket = filterKey)? // bucket name
FROM Filter = identWC // filter reference or whole netmap
(AS Name = ident)? // optional selector name
SELECT Count = NUMBER1 // number of nodes to select without container backup factor *)
(IN clause? Bucket = ident)? // bucket name
FROM Filter = identWC // filter reference or whole netmap
(AS Name = ident)? // optional selector name
;
clause: CLAUSE_SAME | CLAUSE_DISTINCT; // nodes from distinct buckets

File diff suppressed because one or more lines are too long

View file

@ -1,5 +1,4 @@
package parser
// You can download ANTLR from https://www.antlr.org/download/antlr-4.13.1-complete.jar,
// then run generate or simply run the dedicated Makefile target like this `make policy`.
//go:generate java -Xmx500M -cp "./antlr-4.13.1-complete.jar:$CLASSPATH" org.antlr.v4.Tool -Dlanguage=Go -no-listener -visitor QueryLexer.g4 Query.g4
// ANTLR can be downloaded from https://www.antlr.org/download/antlr-4.13.0-complete.jar
//go:generate java -Xmx500M -cp "./antlr-4.13.0-complete.jar:$CLASSPATH" org.antlr.v4.Tool -Dlanguage=Go -no-listener -visitor QueryLexer.g4 Query.g4

View file

@ -1,4 +1,4 @@
// Code generated from netmap/parser/Query.g4 by ANTLR 4.13.1. DO NOT EDIT.
// Code generated from /repo/frostfs/sdk-go/netmap/parser/Query.g4 by ANTLR 4.13.0. DO NOT EDIT.
package parser // Query

View file

@ -1,4 +1,4 @@
// Code generated from netmap/parser/QueryLexer.g4 by ANTLR 4.13.1. DO NOT EDIT.
// Code generated from /repo/frostfs/sdk-go/netmap/parser/QueryLexer.g4 by ANTLR 4.13.0. DO NOT EDIT.
package parser

View file

@ -1,4 +1,4 @@
// Code generated from netmap/parser/Query.g4 by ANTLR 4.13.1. DO NOT EDIT.
// Code generated from /repo/frostfs/sdk-go/netmap/parser/Query.g4 by ANTLR 4.13.0. DO NOT EDIT.
package parser // Query
@ -93,7 +93,7 @@ func queryParserInit() {
85, 1, 0, 0, 0, 85, 7, 1, 0, 0, 0, 86, 87, 5, 10, 0, 0, 87, 88, 5, 22,
0, 0, 88, 9, 1, 0, 0, 0, 89, 90, 5, 11, 0, 0, 90, 96, 5, 22, 0, 0, 91,
93, 5, 8, 0, 0, 92, 94, 3, 12, 6, 0, 93, 92, 1, 0, 0, 0, 93, 94, 1, 0,
0, 0, 94, 95, 1, 0, 0, 0, 95, 97, 3, 20, 10, 0, 96, 91, 1, 0, 0, 0, 96,
0, 0, 94, 95, 1, 0, 0, 0, 95, 97, 3, 28, 14, 0, 96, 91, 1, 0, 0, 0, 96,
97, 1, 0, 0, 0, 97, 98, 1, 0, 0, 0, 98, 99, 5, 12, 0, 0, 99, 102, 3, 30,
15, 0, 100, 101, 5, 9, 0, 0, 101, 103, 3, 28, 14, 0, 102, 100, 1, 0, 0,
0, 102, 103, 1, 0, 0, 0, 103, 11, 1, 0, 0, 0, 104, 105, 7, 0, 0, 0, 105,
@ -1364,7 +1364,7 @@ type ISelectStmtContext interface {
SetCount(antlr.Token)
// GetBucket returns the Bucket rule contexts.
GetBucket() IFilterKeyContext
GetBucket() IIdentContext
// GetFilter returns the Filter rule contexts.
GetFilter() IIdentWCContext
@ -1373,7 +1373,7 @@ type ISelectStmtContext interface {
GetName() IIdentContext
// SetBucket sets the Bucket rule contexts.
SetBucket(IFilterKeyContext)
SetBucket(IIdentContext)
// SetFilter sets the Filter rule contexts.
SetFilter(IIdentWCContext)
@ -1388,8 +1388,8 @@ type ISelectStmtContext interface {
IdentWC() IIdentWCContext
IN() antlr.TerminalNode
AS() antlr.TerminalNode
FilterKey() IFilterKeyContext
Ident() IIdentContext
AllIdent() []IIdentContext
Ident(i int) IIdentContext
Clause() IClauseContext
// IsSelectStmtContext differentiates from other interfaces.
@ -1400,7 +1400,7 @@ type SelectStmtContext struct {
antlr.BaseParserRuleContext
parser antlr.Parser
Count antlr.Token
Bucket IFilterKeyContext
Bucket IIdentContext
Filter IIdentWCContext
Name IIdentContext
}
@ -1436,13 +1436,13 @@ func (s *SelectStmtContext) GetCount() antlr.Token { return s.Count }
func (s *SelectStmtContext) SetCount(v antlr.Token) { s.Count = v }
func (s *SelectStmtContext) GetBucket() IFilterKeyContext { return s.Bucket }
func (s *SelectStmtContext) GetBucket() IIdentContext { return s.Bucket }
func (s *SelectStmtContext) GetFilter() IIdentWCContext { return s.Filter }
func (s *SelectStmtContext) GetName() IIdentContext { return s.Name }
func (s *SelectStmtContext) SetBucket(v IFilterKeyContext) { s.Bucket = v }
func (s *SelectStmtContext) SetBucket(v IIdentContext) { s.Bucket = v }
func (s *SelectStmtContext) SetFilter(v IIdentWCContext) { s.Filter = v }
@ -1484,28 +1484,37 @@ func (s *SelectStmtContext) AS() antlr.TerminalNode {
return s.GetToken(QueryAS, 0)
}
func (s *SelectStmtContext) FilterKey() IFilterKeyContext {
var t antlr.RuleContext
for _, ctx := range s.GetChildren() {
if _, ok := ctx.(IFilterKeyContext); ok {
t = ctx.(antlr.RuleContext)
break
func (s *SelectStmtContext) AllIdent() []IIdentContext {
children := s.GetChildren()
len := 0
for _, ctx := range children {
if _, ok := ctx.(IIdentContext); ok {
len++
}
}
if t == nil {
return nil
tst := make([]IIdentContext, len)
i := 0
for _, ctx := range children {
if t, ok := ctx.(IIdentContext); ok {
tst[i] = t.(IIdentContext)
i++
}
}
return t.(IFilterKeyContext)
return tst
}
func (s *SelectStmtContext) Ident() IIdentContext {
func (s *SelectStmtContext) Ident(i int) IIdentContext {
var t antlr.RuleContext
j := 0
for _, ctx := range s.GetChildren() {
if _, ok := ctx.(IIdentContext); ok {
t = ctx.(antlr.RuleContext)
break
if j == i {
t = ctx.(antlr.RuleContext)
break
}
j++
}
}
@ -1608,7 +1617,7 @@ func (p *Query) SelectStmt() (localctx ISelectStmtContext) {
{
p.SetState(95)
var _x = p.FilterKey()
var _x = p.Ident()
localctx.(*SelectStmtContext).Bucket = _x
}

View file

@ -1,4 +1,4 @@
// Code generated from netmap/parser/Query.g4 by ANTLR 4.13.1. DO NOT EDIT.
// Code generated from /repo/frostfs/sdk-go/netmap/parser/Query.g4 by ANTLR 4.13.0. DO NOT EDIT.
package parser // Query

View file

@ -82,13 +82,6 @@ CBF 1
SELECT 1 FROM Color
FILTER (Color EQ Red OR Color EQ Blue OR Color EQ Yellow) AND Color NE Green AS Color`,
},
{
name: "non-ascii attributes in SELECT IN",
input: `REP 1
CBF 1
SELECT 1 IN SAME 'Цвет' FROM Colorful
FILTER 'Цвет' EQ 'Красный' OR 'Цвет' EQ 'Синий' AS Colorful`,
},
}
for _, tc := range testCases {
@ -134,11 +127,6 @@ func TestDecodeSelectFilterExpr(t *testing.T) {
SELECT 1 FROM R
FILTER Color LIKE 'R' AS R
`,
`
CBF 1
SELECT 1 IN SAME 'Цвет' FROM Colorful
FILTER 'Цвет' EQ 'Красный' OR 'Цвет' EQ 'Синий' AS Colorful
`,
} {
_, err := DecodeSelectFilterString(s)
require.NoError(t, err)

View file

@ -6,7 +6,6 @@ import (
"encoding/binary"
"fmt"
mrand "math/rand"
"reflect"
"slices"
"strconv"
"strings"
@ -14,7 +13,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/netmap"
"git.frostfs.info/TrueCloudLab/hrw"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@ -40,7 +38,7 @@ func BenchmarkHRWSort(b *testing.B) {
b.Run("sort by index, no weight", func(b *testing.B) {
realNodes := make([]nodes, netmapSize)
b.ResetTimer()
for range b.N {
for i := 0; i < b.N; i++ {
b.StopTimer()
copy(realNodes, vectors)
b.StartTimer()
@ -51,7 +49,7 @@ func BenchmarkHRWSort(b *testing.B) {
b.Run("sort by value, no weight", func(b *testing.B) {
realNodes := make([]nodes, netmapSize)
b.ResetTimer()
for range b.N {
for i := 0; i < b.N; i++ {
b.StopTimer()
copy(realNodes, vectors)
b.StartTimer()
@ -62,7 +60,7 @@ func BenchmarkHRWSort(b *testing.B) {
b.Run("only sort by index", func(b *testing.B) {
realNodes := make([]nodes, netmapSize)
b.ResetTimer()
for range b.N {
for i := 0; i < b.N; i++ {
b.StopTimer()
copy(realNodes, vectors)
b.StartTimer()
@ -73,7 +71,7 @@ func BenchmarkHRWSort(b *testing.B) {
b.Run("sort by value", func(b *testing.B) {
realNodes := make([]nodes, netmapSize)
b.ResetTimer()
for range b.N {
for i := 0; i < b.N; i++ {
b.StopTimer()
copy(realNodes, vectors)
b.StartTimer()
@ -84,7 +82,7 @@ func BenchmarkHRWSort(b *testing.B) {
b.Run("sort by ID, then by index (deterministic)", func(b *testing.B) {
realNodes := make([]nodes, netmapSize)
b.ResetTimer()
for range b.N {
for i := 0; i < b.N; i++ {
b.StopTimer()
copy(realNodes, vectors)
b.StartTimer()
@ -136,7 +134,7 @@ func BenchmarkPolicyHRWType(b *testing.B) {
nm.SetNodes(nodes)
b.ResetTimer()
for range b.N {
for i := 0; i < b.N; i++ {
_, err := nm.ContainerNodes(p, []byte{1})
if err != nil {
b.Fatal()
@ -197,7 +195,7 @@ func TestPlacementPolicy_DeterministicOrder(t *testing.T) {
}
a, b := getIndices(t)
for range 10 {
for i := 0; i < 10; i++ {
x, y := getIndices(t)
require.Equal(t, a, x)
require.Equal(t, b, y)
@ -354,7 +352,7 @@ func TestPlacementPolicy_Unique(t *testing.T) {
var nodes []NodeInfo
for i, city := range []string{"Moscow", "Berlin", "Shenzhen"} {
for j := range 3 {
for j := 0; j < 3; j++ {
node := nodeInfoFromAttributes("City", city)
node.SetPublicKey(binary.BigEndian.AppendUint16(nil, uint16(i*4+j)))
nodes = append(nodes, node)
@ -368,7 +366,7 @@ func TestPlacementPolicy_Unique(t *testing.T) {
require.NoError(t, err)
for i, vi := range v {
for _, ni := range vi {
for j := range i {
for j := 0; j < i; j++ {
for _, nj := range v[j] {
require.NotEqual(t, ni.hash, nj.hash)
}
@ -457,7 +455,7 @@ func TestPlacementPolicy_MultiREP(t *testing.T) {
for _, additional := range []int{0, 1, 2} {
t.Run(fmt.Sprintf("unique=%t, additional=%d", unique, additional), func(t *testing.T) {
rs := []ReplicaDescriptor{newReplica(1, "SameRU")}
for range additional {
for i := 0; i < additional; i++ {
rs = append(rs, newReplica(1, ""))
}
@ -544,66 +542,6 @@ func TestPlacementPolicy_ProcessSelectorsExceptForNodes(t *testing.T) {
}
}
func TestPlacementPolicy_NonAsciiAttributes(t *testing.T) {
p := newPlacementPolicy(
1,
[]ReplicaDescriptor{
newReplica(2, "Nodes"),
newReplica(2, "Nodes"),
},
[]Selector{
newSelector("Nodes", "Цвет", 2, "Colorful", (*Selector).SelectSame),
},
[]Filter{
newFilter("Colorful", "", "", netmap.OR,
newFilter("", "Цвет", "Красный", netmap.EQ),
newFilter("", "Цвет", "Синий", netmap.EQ),
),
},
)
p.SetUnique(true)
nodes := []NodeInfo{
nodeInfoFromAttributes("Цвет", "Красный", "Форма", "Треугольник"),
nodeInfoFromAttributes("Цвет", "Красный", "Форма", "Круг"),
nodeInfoFromAttributes("Цвет", "Синий", "Форма", "Треугольник"),
nodeInfoFromAttributes("Цвет", "Синий", "Форма", "Круг"),
nodeInfoFromAttributes("Свойство", "Мягкий", "Форма", "Треугольник"),
nodeInfoFromAttributes("Свойство", "Теплый", "Форма", "Круг"),
}
for i := range nodes {
nodes[i].SetPublicKey([]byte{byte(i)})
}
redNodes := nodes[:2]
blueNodes := nodes[2:4]
var nm NetMap
nm.SetNodes(nodes)
pivot := make([]byte, 42)
_, _ = rand.Read(pivot)
nodesPerReplica, err := nm.ContainerNodes(p, pivot)
require.NoError(t, err)
require.Len(t, nodesPerReplica, 2)
for i := range nodesPerReplica {
slices.SortFunc(nodesPerReplica[i], func(n1, n2 NodeInfo) int {
pk1, pk2 := string(n1.PublicKey()), string(n2.PublicKey())
return cmp.Compare(pk1, pk2)
})
}
redMatchFirst := reflect.DeepEqual(redNodes, nodesPerReplica[0])
blueMatchFirst := reflect.DeepEqual(blueNodes, nodesPerReplica[0])
redMatchSecond := reflect.DeepEqual(redNodes, nodesPerReplica[1])
blueMatchSecond := reflect.DeepEqual(blueNodes, nodesPerReplica[1])
assert.True(t, redMatchFirst && blueMatchSecond || blueMatchFirst && redMatchSecond)
}
func TestSelector_SetName(t *testing.T) {
const name = "some name"
var s Selector

View file

@ -130,7 +130,7 @@ func BenchmarkPlacementPolicyInteropability(b *testing.B) {
b.Run(name, func(b *testing.B) {
b.ReportAllocs()
b.ResetTimer()
for range b.N {
for i := 0; i < b.N; i++ {
b.StartTimer()
v, err := nm.ContainerNodes(tt.Policy, tt.Pivot)
b.StopTimer()
@ -173,7 +173,7 @@ func BenchmarkManySelects(b *testing.B) {
b.ResetTimer()
b.ReportAllocs()
for range b.N {
for i := 0; i < b.N; i++ {
_, err = nm.ContainerNodes(tt.Policy, tt.Pivot)
if err != nil {
b.FailNow()

View file

@ -53,7 +53,7 @@ func TestErasureCodeReconstruct(t *testing.T) {
})
t.Run("from parity", func(t *testing.T) {
parts := cloneSlice(parts)
for i := range parityCount {
for i := 0; i < parityCount; i++ {
parts[i] = nil
}
reconstructed, err := c.ReconstructHeader(parts)
@ -138,7 +138,7 @@ func TestErasureCodeReconstruct(t *testing.T) {
})
t.Run("from parity", func(t *testing.T) {
parts := cloneSlice(parts)
for i := range parityCount {
for i := 0; i < parityCount; i++ {
parts[i] = nil
}
reconstructed, err := c.Reconstruct(parts)
@ -180,7 +180,7 @@ func TestErasureCodeReconstruct(t *testing.T) {
t.Run("from parity", func(t *testing.T) {
oldParts := parts
parts := cloneSlice(parts)
for i := range parityCount {
for i := 0; i < parityCount; i++ {
parts[i] = nil
}

View file

@ -61,7 +61,7 @@ func TestID_Equal(t *testing.T) {
func TestID_Parse(t *testing.T) {
t.Run("should parse successful", func(t *testing.T) {
for i := range 10 {
for i := 0; i < 10; i++ {
t.Run(strconv.Itoa(i), func(t *testing.T) {
cs := randSHA256Checksum(t)
str := base58.Encode(cs[:])
@ -78,7 +78,7 @@ func TestID_Parse(t *testing.T) {
})
t.Run("should failure on parse", func(t *testing.T) {
for i := range 10 {
for i := 0; i < 10; i++ {
j := i
t.Run(strconv.Itoa(j), func(t *testing.T) {
cs := []byte{1, 2, 3, 4, 5, byte(j)}
@ -98,7 +98,7 @@ func TestID_String(t *testing.T) {
})
t.Run("should be equal", func(t *testing.T) {
for i := range 10 {
for i := 0; i < 10; i++ {
t.Run(strconv.Itoa(i), func(t *testing.T) {
cs := randSHA256Checksum(t)
str := base58.Encode(cs[:])

View file

@ -3,10 +3,7 @@ package object
import (
"errors"
"fmt"
"slices"
"strings"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
v2session "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
@ -315,23 +312,6 @@ func (o *Object) Attributes() []Attribute {
return res
}
// UserAttributes returns object user attributes.
func (o *Object) UserAttributes() []Attribute {
attrs := (*object.Object)(o).
GetHeader().
GetAttributes()
res := make([]Attribute, 0, len(attrs))
for _, attr := range attrs {
if !strings.HasPrefix(attr.GetKey(), container.SysAttributePrefix) {
res = append(res, *NewAttributeFromV2(&attr))
}
}
return slices.Clip(res)
}
// SetAttributes sets object attributes.
func (o *Object) SetAttributes(v ...Attribute) {
attrs := make([]object.Attribute, len(v))

View file

@ -3,10 +3,8 @@ package object_test
import (
"testing"
v2container "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
objecttest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/test"
usertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user/test"
"github.com/stretchr/testify/require"
)
@ -26,26 +24,3 @@ func TestInitCreation(t *testing.T) {
require.Equal(t, cnr, cID)
require.Equal(t, own, o.OwnerID())
}
func Test_Attributes(t *testing.T) {
obj := objecttest.Object()
t.Run("get user attributes", func(t *testing.T) {
// See how we create a test object. It's created with two attributes.
require.Len(t, obj.UserAttributes(), 2)
})
userAttrs := obj.UserAttributes()
sysAttr := *object.NewAttribute()
sysAttr.SetKey(v2container.SysAttributePrefix + "key")
sysAttr.SetValue("value")
attr := append(userAttrs, sysAttr)
obj.SetAttributes(attr...)
t.Run("get attributes", func(t *testing.T) {
require.ElementsMatch(t, obj.UserAttributes(), userAttrs)
require.ElementsMatch(t, obj.Attributes(), attr)
})
}

View file

@ -14,7 +14,7 @@ func generateIDList(sz int) []oid.ID {
res := make([]oid.ID, sz)
cs := [sha256.Size]byte{}
for i := range sz {
for i := 0; i < sz; i++ {
var oID oid.ID
res[i] = oID

View file

@ -72,7 +72,7 @@ func TestTransformer(t *testing.T) {
require.Equal(t, ids.ParentID, &parID)
children := tt.objects[i].Children()
for j := range i {
for j := 0; j < i; j++ {
id, ok := tt.objects[j].ID()
require.True(t, ok)
require.Equal(t, id, children[j])
@ -152,7 +152,7 @@ func benchmarkTransformer(b *testing.B, header *objectSDK.Object, payloadSize, s
b.ReportAllocs()
b.ResetTimer()
for range b.N {
for i := 0; i < b.N; i++ {
f, _ := newPayloadSizeLimiter(maxSize, uint64(sizeHint), func() ObjectWriter { return benchTarget{} })
if err := f.WriteHeader(ctx, header); err != nil {
b.Fatalf("write header: %v", err)

View file

@ -26,15 +26,11 @@ type mockClient struct {
errorOnDial bool
errorOnCreateSession bool
errorOnEndpointInfo error
resOnEndpointInfo netmap.NodeInfo
healthcheckFn func()
errorOnEndpointInfo bool
errorOnNetworkInfo bool
stOnGetObject apistatus.Status
}
var _ client = (*mockClient)(nil)
func newMockClient(addr string, key ecdsa.PrivateKey) *mockClient {
return &mockClient{
key: key,
@ -42,16 +38,6 @@ func newMockClient(addr string, key ecdsa.PrivateKey) *mockClient {
}
}
func newMockClientHealthy(addr string, key ecdsa.PrivateKey, healthy bool) *mockClient {
m := newMockClient(addr, key)
if healthy {
m.setHealthy()
} else {
m.setUnhealthy()
}
return m
}
func (m *mockClient) setThreshold(threshold uint32) {
m.errorThreshold = threshold
}
@ -61,11 +47,11 @@ func (m *mockClient) errOnCreateSession() {
}
func (m *mockClient) errOnEndpointInfo() {
m.errorOnEndpointInfo = errors.New("error")
m.errorOnEndpointInfo = true
}
func (m *mockClient) errOnNetworkInfo() {
m.errorOnEndpointInfo = errors.New("error")
m.errorOnEndpointInfo = true
}
func (m *mockClient) errOnDial() {
@ -108,32 +94,27 @@ func (m *mockClient) containerDelete(context.Context, PrmContainerDelete) error
return nil
}
func (m *mockClient) apeManagerAddChain(context.Context, PrmAddAPEChain) error {
func (c *mockClient) apeManagerAddChain(ctx context.Context, prm PrmAddAPEChain) error {
return nil
}
func (m *mockClient) apeManagerRemoveChain(context.Context, PrmRemoveAPEChain) error {
func (c *mockClient) apeManagerRemoveChain(ctx context.Context, prm PrmRemoveAPEChain) error {
return nil
}
func (m *mockClient) apeManagerListChains(context.Context, PrmListAPEChains) ([]ape.Chain, error) {
func (c *mockClient) apeManagerListChains(ctx context.Context, prm PrmListAPEChains) ([]ape.Chain, error) {
return []ape.Chain{}, nil
}
func (m *mockClient) endpointInfo(ctx context.Context, _ prmEndpointInfo) (netmap.NodeInfo, error) {
if m.errorOnEndpointInfo != nil {
return netmap.NodeInfo{}, m.handleError(ctx, nil, m.errorOnEndpointInfo)
var ni netmap.NodeInfo
if m.errorOnEndpointInfo {
return ni, m.handleError(ctx, nil, errors.New("error"))
}
m.resOnEndpointInfo.SetNetworkEndpoints(m.addr)
return m.resOnEndpointInfo, nil
}
func (m *mockClient) healthcheck(ctx context.Context) (netmap.NodeInfo, error) {
if m.healthcheckFn != nil {
m.healthcheckFn()
}
return m.endpointInfo(ctx, prmEndpointInfo{})
ni.SetNetworkEndpoints(m.addr)
return ni, nil
}
func (m *mockClient) networkInfo(ctx context.Context, _ prmNetworkInfo) (netmap.NetworkInfo, error) {
@ -209,12 +190,16 @@ func (m *mockClient) dial(context.Context) error {
return nil
}
func (m *mockClient) restart(context.Context) error {
if m.errorOnDial {
return errors.New("restart dial error")
func (m *mockClient) restartIfUnhealthy(ctx context.Context) (changed bool, err error) {
_, err = m.endpointInfo(ctx, prmEndpointInfo{})
healthy := err == nil
changed = healthy != m.isHealthy()
if healthy {
m.setHealthy()
} else {
m.setUnhealthy()
}
return nil
return
}
func (m *mockClient) close() error {

View file

@ -57,8 +57,6 @@ type client interface {
apeManagerListChains(context.Context, PrmListAPEChains) ([]ape.Chain, error)
// see clientWrapper.endpointInfo.
endpointInfo(context.Context, prmEndpointInfo) (netmap.NodeInfo, error)
// see clientWrapper.healthcheck.
healthcheck(ctx context.Context) (netmap.NodeInfo, error)
// see clientWrapper.networkInfo.
networkInfo(context.Context, prmNetworkInfo) (netmap.NetworkInfo, error)
// see clientWrapper.netMapSnapshot
@ -84,8 +82,8 @@ type client interface {
// see clientWrapper.dial.
dial(ctx context.Context) error
// see clientWrapper.restart.
restart(ctx context.Context) error
// see clientWrapper.restartIfUnhealthy.
restartIfUnhealthy(ctx context.Context) (bool, error)
// see clientWrapper.close.
close() error
}
@ -94,10 +92,10 @@ type client interface {
type clientStatus interface {
// isHealthy checks if the connection can handle requests.
isHealthy() bool
// isDialed checks if the connection was created.
isDialed() bool
// setUnhealthy marks client as unhealthy.
setUnhealthy()
// setHealthy marks client as healthy.
setHealthy()
// address return address of endpoint.
address() string
// currentErrorRate returns current errors rate.
@ -128,10 +126,15 @@ type clientStatusMonitor struct {
// values for healthy status of clientStatusMonitor.
const (
// statusUnhealthyOnDial is set when dialing to the endpoint is failed,
// so there is no connection to the endpoint, and pool should not close it
// before re-establishing connection once again.
statusUnhealthyOnDial = iota
// statusUnhealthyOnRequest is set when communication after dialing to the
// endpoint is failed due to immediate or accumulated errors, connection is
// available and pool should close it before re-establishing connection once again.
statusUnhealthyOnRequest = iota
statusUnhealthyOnRequest
// statusHealthy is set when connection is ready to be used by the pool.
statusHealthy
@ -230,7 +233,6 @@ func newClientStatusMonitor(logger *zap.Logger, addr string, errorThreshold uint
type clientWrapper struct {
clientMutex sync.RWMutex
client *sdkClient.Client
dialed bool
prm wrapperPrm
clientStatusMonitor
@ -340,17 +342,30 @@ func (c *clientWrapper) dial(ctx context.Context) error {
GRPCDialOptions: c.prm.dialOptions,
}
err = cl.Dial(ctx, prmDial)
c.setDialed(err == nil)
if err != nil {
if err = cl.Dial(ctx, prmDial); err != nil {
c.setUnhealthyOnDial()
return err
}
return nil
}
// restart recreates and redial inner sdk client.
func (c *clientWrapper) restart(ctx context.Context) error {
// restartIfUnhealthy checks healthy status of client and recreate it if status is unhealthy.
// Indicating if status was changed by this function call and returns error that caused unhealthy status.
func (c *clientWrapper) restartIfUnhealthy(ctx context.Context) (changed bool, err error) {
var wasHealthy bool
if _, err = c.endpointInfo(ctx, prmEndpointInfo{}); err == nil {
return false, nil
} else if !errors.Is(err, errPoolClientUnhealthy) {
wasHealthy = true
}
// if connection is dialed before, to avoid routine / connection leak,
// pool has to close it and then initialize once again.
if c.isDialed() {
c.scheduleGracefulClose()
}
var cl sdkClient.Client
prmInit := sdkClient.PrmInit{
Key: c.prm.key,
@ -366,35 +381,22 @@ func (c *clientWrapper) restart(ctx context.Context) error {
GRPCDialOptions: c.prm.dialOptions,
}
// if connection is dialed before, to avoid routine / connection leak,
// pool has to close it and then initialize once again.
if c.isDialed() {
c.scheduleGracefulClose()
}
err := cl.Dial(ctx, prmDial)
c.setDialed(err == nil)
if err != nil {
return err
if err = cl.Dial(ctx, prmDial); err != nil {
c.setUnhealthyOnDial()
return wasHealthy, err
}
c.clientMutex.Lock()
c.client = &cl
c.clientMutex.Unlock()
return nil
}
if _, err = cl.EndpointInfo(ctx, sdkClient.PrmEndpointInfo{}); err != nil {
c.setUnhealthy()
return wasHealthy, err
}
func (c *clientWrapper) isDialed() bool {
c.mu.RLock()
defer c.mu.RUnlock()
return c.dialed
}
func (c *clientWrapper) setDialed(dialed bool) {
c.mu.Lock()
c.dialed = dialed
c.mu.Unlock()
c.setHealthy()
return !wasHealthy, nil
}
func (c *clientWrapper) getClient() (*sdkClient.Client, error) {
@ -652,15 +654,6 @@ func (c *clientWrapper) endpointInfo(ctx context.Context, _ prmEndpointInfo) (ne
return netmap.NodeInfo{}, err
}
return c.endpointInfoRaw(ctx, cl)
}
func (c *clientWrapper) healthcheck(ctx context.Context) (netmap.NodeInfo, error) {
cl := c.getClientRaw()
return c.endpointInfoRaw(ctx, cl)
}
func (c *clientWrapper) endpointInfoRaw(ctx context.Context, cl *sdkClient.Client) (netmap.NodeInfo, error) {
start := time.Now()
res, err := cl.EndpointInfo(ctx, sdkClient.PrmEndpointInfo{})
c.incRequests(time.Since(start), methodEndpointInfo)
@ -1092,7 +1085,7 @@ func (c *clientWrapper) objectSearch(ctx context.Context, prm PrmObjectSearch) (
return ResObjectSearch{}, fmt.Errorf("init object searching on client: %w", err)
}
return ResObjectSearch{r: res, handleError: c.handleError}, nil
return ResObjectSearch{r: res}, nil
}
// sessionCreate invokes sdkClient.SessionCreate parse response status to error and return result as is.
@ -1128,6 +1121,10 @@ func (c *clientStatusMonitor) isHealthy() bool {
return c.healthy.Load() == statusHealthy
}
func (c *clientStatusMonitor) isDialed() bool {
return c.healthy.Load() != statusUnhealthyOnDial
}
func (c *clientStatusMonitor) setHealthy() {
c.healthy.Store(statusHealthy)
}
@ -1136,6 +1133,10 @@ func (c *clientStatusMonitor) setUnhealthy() {
c.healthy.Store(statusUnhealthyOnRequest)
}
func (c *clientStatusMonitor) setUnhealthyOnDial() {
c.healthy.Store(statusUnhealthyOnDial)
}
func (c *clientStatusMonitor) address() string {
return c.addr
}
@ -1158,16 +1159,6 @@ func (c *clientStatusMonitor) incErrorRate() {
}
}
func (c *clientStatusMonitor) incErrorRateToUnhealthy(err error) {
c.mu.Lock()
c.currentErrorCount = 0
c.overallErrorCount++
c.setUnhealthy()
c.mu.Unlock()
c.log(zapcore.WarnLevel, "explicitly mark node unhealthy", zap.String("address", c.addr), zap.Error(err))
}
func (c *clientStatusMonitor) log(level zapcore.Level, msg string, fields ...zap.Field) {
if c.logger == nil {
return
@ -1210,9 +1201,6 @@ func (c *clientWrapper) incRequests(elapsed time.Duration, method MethodIndex) {
}
func (c *clientWrapper) close() error {
if !c.isDialed() {
return nil
}
if cl := c.getClientRaw(); cl != nil {
return cl.Close()
}
@ -1237,10 +1225,9 @@ func (c *clientStatusMonitor) handleError(ctx context.Context, st apistatus.Stat
switch stErr.(type) {
case *apistatus.ServerInternal,
*apistatus.WrongMagicNumber,
*apistatus.SignatureVerification:
*apistatus.SignatureVerification,
*apistatus.NodeUnderMaintenance:
c.incErrorRate()
case *apistatus.NodeUnderMaintenance:
c.incErrorRateToUnhealthy(stErr)
}
if err == nil {
@ -1252,11 +1239,7 @@ func (c *clientStatusMonitor) handleError(ctx context.Context, st apistatus.Stat
if err != nil {
if needCountError(ctx, err) {
if sdkClient.IsErrNodeUnderMaintenance(err) {
c.incErrorRateToUnhealthy(err)
} else {
c.incErrorRate()
}
c.incErrorRate()
}
return err
@ -1278,7 +1261,7 @@ func needCountError(ctx context.Context, err error) bool {
return false
}
if ctx != nil && errors.Is(ctx.Err(), context.Canceled) {
if errors.Is(ctx.Err(), context.Canceled) {
return false
}
@ -2155,9 +2138,7 @@ func adjustNodeParams(nodeParams []NodeParam) ([]*nodesParam, error) {
// startRebalance runs loop to monitor connection healthy status.
func (p *Pool) startRebalance(ctx context.Context) {
ticker := time.NewTicker(p.rebalanceParams.clientRebalanceInterval)
defer ticker.Stop()
ticker := time.NewTimer(p.rebalanceParams.clientRebalanceInterval)
buffers := make([][]float64, len(p.rebalanceParams.nodesParams))
for i, params := range p.rebalanceParams.nodesParams {
buffers[i] = make([]float64, len(params.weights))
@ -2207,7 +2188,7 @@ func (p *Pool) updateInnerNodesHealth(ctx context.Context, i int, bufferWeights
tctx, c := context.WithTimeout(ctx, options.nodeRequestTimeout)
defer c()
changed, err := restartIfUnhealthy(tctx, cli)
changed, err := cli.restartIfUnhealthy(tctx)
healthy := err == nil
if healthy {
bufferWeights[j] = options.nodesParams[i].weights[j]
@ -2238,43 +2219,6 @@ func (p *Pool) updateInnerNodesHealth(ctx context.Context, i int, bufferWeights
}
}
// restartIfUnhealthy checks healthy status of client and recreate it if status is unhealthy.
// Indicating if status was changed by this function call and returns error that caused unhealthy status.
func restartIfUnhealthy(ctx context.Context, c client) (changed bool, err error) {
defer func() {
if err != nil {
c.setUnhealthy()
} else {
c.setHealthy()
}
}()
wasHealthy := c.isHealthy()
if res, err := c.healthcheck(ctx); err == nil {
if res.Status().IsMaintenance() {
return wasHealthy, new(apistatus.NodeUnderMaintenance)
}
return !wasHealthy, nil
}
if err = c.restart(ctx); err != nil {
return wasHealthy, err
}
res, err := c.healthcheck(ctx)
if err != nil {
return wasHealthy, err
}
if res.Status().IsMaintenance() {
return wasHealthy, new(apistatus.NodeUnderMaintenance)
}
return !wasHealthy, nil
}
func adjustWeights(weights []float64) []float64 {
adjusted := make([]float64, len(weights))
sum := 0.0
@ -2312,7 +2256,7 @@ func (p *innerPool) connection() (client, error) {
return nil, errors.New("no healthy client")
}
attempts := 3 * len(p.clients)
for range attempts {
for k := 0; k < attempts; k++ {
i := p.sampler.Next()
if cp := p.clients[i]; cp.isHealthy() {
return cp, nil
@ -2764,25 +2708,18 @@ func (p *Pool) ObjectRange(ctx context.Context, prm PrmObjectRange) (ResObjectRa
//
// Must be initialized using Pool.SearchObjects, any other usage is unsafe.
type ResObjectSearch struct {
r *sdkClient.ObjectListReader
handleError func(context.Context, apistatus.Status, error) error
r *sdkClient.ObjectListReader
}
// Read reads another list of the object identifiers.
func (x *ResObjectSearch) Read(buf []oid.ID) (int, error) {
n, ok := x.r.Read(buf)
if !ok {
res, err := x.r.Close()
_, err := x.r.Close()
if err == nil {
return n, io.EOF
}
var status apistatus.Status
if res != nil {
status = res.Status()
}
err = x.handleError(nil, status, err)
return n, err
}
@ -3080,7 +3017,9 @@ func (p *Pool) Close() {
// close all clients
for _, pools := range p.innerPools {
for _, cli := range pools.clients {
_ = cli.close()
if cli.isDialed() {
_ = cli.close()
}
}
}
}

View file

@ -4,13 +4,12 @@ import (
"context"
"crypto/ecdsa"
"errors"
"math/rand"
"strconv"
"testing"
"time"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
frostfsecdsa "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto/ecdsa"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
@ -18,8 +17,6 @@ import (
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/stretchr/testify/require"
"go.uber.org/zap"
"go.uber.org/zap/zaptest"
"go.uber.org/zap/zaptest/observer"
)
func TestBuildPoolClientFailed(t *testing.T) {
@ -225,7 +222,7 @@ func TestOneOfTwoFailed(t *testing.T) {
time.Sleep(2 * time.Second)
for range 5 {
for i := 0; i < 5; i++ {
cp, err := pool.connection()
require.NoError(t, err)
st, _ := pool.cache.Get(formCacheKey(cp.address(), pool.key, false))
@ -233,179 +230,6 @@ func TestOneOfTwoFailed(t *testing.T) {
}
}
func TestUpdateNodesHealth(t *testing.T) {
ctx := context.Background()
key := newPrivateKey(t)
for _, tc := range []struct {
name string
wasHealthy bool
willHealthy bool
prepareCli func(*mockClient)
}{
{
name: "healthy, maintenance, unhealthy",
wasHealthy: true,
willHealthy: false,
prepareCli: func(c *mockClient) { c.resOnEndpointInfo.SetStatus(netmap.Maintenance) },
},
{
name: "unhealthy, maintenance, unhealthy",
wasHealthy: false,
willHealthy: false,
prepareCli: func(c *mockClient) { c.resOnEndpointInfo.SetStatus(netmap.Maintenance) },
},
{
name: "healthy, no error, healthy",
wasHealthy: true,
willHealthy: true,
prepareCli: func(c *mockClient) { c.resOnEndpointInfo.SetStatus(netmap.Online) },
},
{
name: "unhealthy, no error, healthy",
wasHealthy: false,
willHealthy: true,
prepareCli: func(c *mockClient) { c.resOnEndpointInfo.SetStatus(netmap.Online) },
},
{
name: "healthy, error, failed restart, unhealthy",
wasHealthy: true,
willHealthy: false,
prepareCli: func(c *mockClient) {
c.errOnEndpointInfo()
c.errorOnDial = true
},
},
{
name: "unhealthy, error, failed restart, unhealthy",
wasHealthy: false,
willHealthy: false,
prepareCli: func(c *mockClient) {
c.errOnEndpointInfo()
c.errorOnDial = true
},
},
{
name: "healthy, error, restart, error, unhealthy",
wasHealthy: true,
willHealthy: false,
prepareCli: func(c *mockClient) { c.errOnEndpointInfo() },
},
{
name: "unhealthy, error, restart, error, unhealthy",
wasHealthy: false,
willHealthy: false,
prepareCli: func(c *mockClient) { c.errOnEndpointInfo() },
},
{
name: "healthy, error, restart, maintenance, unhealthy",
wasHealthy: true,
willHealthy: false,
prepareCli: func(c *mockClient) {
healthError := true
c.healthcheckFn = func() {
if healthError {
c.errorOnEndpointInfo = errors.New("error")
} else {
c.errorOnEndpointInfo = nil
c.resOnEndpointInfo.SetStatus(netmap.Maintenance)
}
healthError = !healthError
}
},
},
{
name: "unhealthy, error, restart, maintenance, unhealthy",
wasHealthy: false,
willHealthy: false,
prepareCli: func(c *mockClient) {
healthError := true
c.healthcheckFn = func() {
if healthError {
c.errorOnEndpointInfo = errors.New("error")
} else {
c.errorOnEndpointInfo = nil
c.resOnEndpointInfo.SetStatus(netmap.Maintenance)
}
healthError = !healthError
}
},
},
{
name: "healthy, error, restart, healthy",
wasHealthy: true,
willHealthy: true,
prepareCli: func(c *mockClient) {
healthError := true
c.healthcheckFn = func() {
if healthError {
c.errorOnEndpointInfo = errors.New("error")
} else {
c.errorOnEndpointInfo = nil
}
healthError = !healthError
}
},
},
{
name: "unhealthy, error, restart, healthy",
wasHealthy: false,
willHealthy: true,
prepareCli: func(c *mockClient) {
healthError := true
c.healthcheckFn = func() {
if healthError {
c.errorOnEndpointInfo = errors.New("error")
} else {
c.errorOnEndpointInfo = nil
}
healthError = !healthError
}
},
},
} {
t.Run(tc.name, func(t *testing.T) {
cli := newMockClientHealthy("peer0", *key, tc.wasHealthy)
tc.prepareCli(cli)
p, log := newPool(t, cli)
p.updateNodesHealth(ctx, [][]float64{{1}})
changed := tc.wasHealthy != tc.willHealthy
require.Equalf(t, tc.willHealthy, cli.isHealthy(), "healthy status should be: %v", tc.willHealthy)
require.Equalf(t, changed, 1 == log.Len(), "healthy status should be changed: %v", changed)
})
}
}
func newPool(t *testing.T, cli *mockClient) (*Pool, *observer.ObservedLogs) {
log, observedLog := getObservedLogger()
cache, err := newCache(0)
require.NoError(t, err)
return &Pool{
innerPools: []*innerPool{{
sampler: newSampler([]float64{1}, rand.NewSource(0)),
clients: []client{cli},
}},
cache: cache,
key: newPrivateKey(t),
closedCh: make(chan struct{}),
rebalanceParams: rebalanceParameters{
nodesParams: []*nodesParam{{1, []string{"peer0"}, []float64{1}}},
nodeRequestTimeout: time.Second,
clientRebalanceInterval: 200 * time.Millisecond,
},
logger: log,
}, observedLog
}
func getObservedLogger() (*zap.Logger, *observer.ObservedLogs) {
loggerCore, observedLog := observer.New(zap.DebugLevel)
return zap.New(loggerCore), observedLog
}
func TestTwoFailed(t *testing.T) {
var clientKeys []*ecdsa.PrivateKey
mockClientBuilder := func(addr string) client {
@ -690,7 +514,7 @@ func TestStatusMonitor(t *testing.T) {
monitor.errorThreshold = 3
count := 10
for range count {
for i := 0; i < count; i++ {
monitor.incErrorRate()
}
@ -705,6 +529,13 @@ func TestStatusMonitor(t *testing.T) {
isHealthy bool
description string
}{
{
action: func(m *clientStatusMonitor) { m.setUnhealthyOnDial() },
status: statusUnhealthyOnDial,
isDialed: false,
isHealthy: false,
description: "set unhealthy on dial",
},
{
action: func(m *clientStatusMonitor) { m.setUnhealthy() },
status: statusUnhealthyOnRequest,
@ -723,6 +554,7 @@ func TestStatusMonitor(t *testing.T) {
for _, tc := range cases {
tc.action(&monitor)
require.Equal(t, tc.status, monitor.healthy.Load())
require.Equal(t, tc.isDialed, monitor.isDialed())
require.Equal(t, tc.isHealthy, monitor.isHealthy())
}
})
@ -730,22 +562,19 @@ func TestStatusMonitor(t *testing.T) {
func TestHandleError(t *testing.T) {
ctx := context.Background()
log := zaptest.NewLogger(t)
monitor := newClientStatusMonitor(zap.NewExample(), "", 10)
canceledCtx, cancel := context.WithCancel(context.Background())
cancel()
for _, tc := range []struct {
name string
ctx context.Context
status apistatus.Status
err error
expectedError bool
countError bool
markedUnhealthy bool
for i, tc := range []struct {
ctx context.Context
status apistatus.Status
err error
expectedError bool
countError bool
}{
{
name: "no error, no status",
ctx: ctx,
status: nil,
err: nil,
@ -753,7 +582,6 @@ func TestHandleError(t *testing.T) {
countError: false,
},
{
name: "no error, success status",
ctx: ctx,
status: new(apistatus.SuccessDefaultV2),
err: nil,
@ -761,7 +589,6 @@ func TestHandleError(t *testing.T) {
countError: false,
},
{
name: "error, success status",
ctx: ctx,
status: new(apistatus.SuccessDefaultV2),
err: errors.New("error"),
@ -769,7 +596,6 @@ func TestHandleError(t *testing.T) {
countError: true,
},
{
name: "error, no status",
ctx: ctx,
status: nil,
err: errors.New("error"),
@ -777,7 +603,6 @@ func TestHandleError(t *testing.T) {
countError: true,
},
{
name: "no error, object not found status",
ctx: ctx,
status: new(apistatus.ObjectNotFound),
err: nil,
@ -785,7 +610,6 @@ func TestHandleError(t *testing.T) {
countError: false,
},
{
name: "object not found error, object not found status",
ctx: ctx,
status: new(apistatus.ObjectNotFound),
err: &apistatus.ObjectNotFound{},
@ -793,7 +617,6 @@ func TestHandleError(t *testing.T) {
countError: false,
},
{
name: "eacl not found error, no status",
ctx: ctx,
status: nil,
err: &apistatus.EACLNotFound{},
@ -804,7 +627,6 @@ func TestHandleError(t *testing.T) {
countError: true,
},
{
name: "no error, internal status",
ctx: ctx,
status: new(apistatus.ServerInternal),
err: nil,
@ -812,7 +634,6 @@ func TestHandleError(t *testing.T) {
countError: true,
},
{
name: "no error, wrong magic status",
ctx: ctx,
status: new(apistatus.WrongMagicNumber),
err: nil,
@ -820,7 +641,6 @@ func TestHandleError(t *testing.T) {
countError: true,
},
{
name: "no error, signature verification status",
ctx: ctx,
status: new(apistatus.SignatureVerification),
err: nil,
@ -828,33 +648,20 @@ func TestHandleError(t *testing.T) {
countError: true,
},
{
name: "no error, maintenance status",
ctx: ctx,
status: new(apistatus.NodeUnderMaintenance),
err: nil,
expectedError: true,
countError: true,
markedUnhealthy: true,
},
{
name: "maintenance error, no status",
ctx: ctx,
status: nil,
err: &apistatus.NodeUnderMaintenance{},
expectedError: true,
countError: true,
markedUnhealthy: true,
},
{
name: "no error, invalid argument status",
ctx: ctx,
status: new(apistatus.InvalidArgument),
status: new(apistatus.SignatureVerification),
err: nil,
expectedError: true,
countError: false,
countError: true,
},
{
ctx: ctx,
status: new(apistatus.NodeUnderMaintenance),
err: nil,
expectedError: true,
countError: true,
},
{
name: "context canceled error, no status",
ctx: canceledCtx,
status: nil,
err: errors.New("error"),
@ -862,9 +669,8 @@ func TestHandleError(t *testing.T) {
countError: false,
},
} {
t.Run(tc.name, func(t *testing.T) {
monitor := newClientStatusMonitor(log, "", 10)
errCount := monitor.overallErrorRate()
t.Run(strconv.Itoa(i), func(t *testing.T) {
errCount := monitor.currentErrorRate()
err := monitor.handleError(tc.ctx, tc.status, tc.err)
if tc.expectedError {
require.Error(t, err)
@ -874,10 +680,7 @@ func TestHandleError(t *testing.T) {
if tc.countError {
errCount++
}
require.Equal(t, errCount, monitor.overallErrorRate())
if tc.markedUnhealthy {
require.False(t, monitor.isHealthy())
}
require.Equal(t, errCount, monitor.currentErrorRate())
})
}
}
@ -921,7 +724,7 @@ func TestSwitchAfterErrorThreshold(t *testing.T) {
require.NoError(t, err)
t.Cleanup(pool.Close)
for range errorThreshold {
for i := 0; i < errorThreshold; i++ {
conn, err := pool.connection()
require.NoError(t, err)
require.Equal(t, nodes[0].address, conn.address())

View file

@ -30,7 +30,7 @@ func newSampler(probabilities []float64, source rand.Source) *sampler {
sampler.alias = make([]int, n)
// Compute scaled probabilities.
p := make([]float64, n)
for i := range n {
for i := 0; i < n; i++ {
p[i] = probabilities[i] * float64(n)
}
for i, pi := range p {

View file

@ -32,7 +32,7 @@ func TestSamplerStability(t *testing.T) {
for _, tc := range cases {
sampler := newSampler(tc.probabilities, rand.NewSource(0))
res := make([]int, len(tc.probabilities))
for range COUNT {
for i := 0; i < COUNT; i++ {
res[sampler.Next()]++
}

View file

@ -43,7 +43,7 @@ func (c *treeClient) dial(ctx context.Context) error {
}
var err error
if c.conn, c.service, err = createClient(c.address, c.opts...); err != nil {
if c.conn, c.service, err = dialClient(ctx, c.address, c.opts...); err != nil {
return err
}
@ -61,7 +61,7 @@ func (c *treeClient) redialIfNecessary(ctx context.Context) (healthHasChanged bo
defer c.mu.Unlock()
if c.conn == nil {
if c.conn, c.service, err = createClient(c.address, c.opts...); err != nil {
if c.conn, c.service, err = dialClient(ctx, c.address, c.opts...); err != nil {
return false, err
}
}
@ -77,7 +77,7 @@ func (c *treeClient) redialIfNecessary(ctx context.Context) (healthHasChanged bo
return !wasHealthy, nil
}
func createClient(addr string, clientOptions ...grpc.DialOption) (*grpc.ClientConn, grpcService.TreeServiceClient, error) {
func dialClient(ctx context.Context, addr string, clientOptions ...grpc.DialOption) (*grpc.ClientConn, grpcService.TreeServiceClient, error) {
host, tlsEnable, err := apiClient.ParseURI(addr)
if err != nil {
return nil, nil, fmt.Errorf("parse address: %w", err)
@ -93,9 +93,9 @@ func createClient(addr string, clientOptions ...grpc.DialOption) (*grpc.ClientCo
// the order is matter, we want client to be able to overwrite options.
opts := append(options, clientOptions...)
conn, err := grpc.NewClient(host, opts...)
conn, err := grpc.DialContext(ctx, host, opts...)
if err != nil {
return nil, nil, fmt.Errorf("grpc create node tree service: %w", err)
return nil, nil, fmt.Errorf("grpc dial node tree service: %w", err)
}
return conn, grpcService.NewTreeServiceClient(conn), nil

View file

@ -415,7 +415,7 @@ func (p *Pool) GetNodes(ctx context.Context, prm GetNodesParams) ([]*grpcService
// Empty result is expected due to delayed tree service sync.
// Return an error there to trigger retry and ignore it after,
// to keep compatibility with 'GetNodeByPath' implementation.
if inErr == nil && len(resp.GetBody().GetNodes()) == 0 {
if inErr == nil && len(resp.Body.Nodes) == 0 {
return errNodeEmptyResult
}
return handleError("failed to get node by path", inErr)
@ -437,14 +437,14 @@ type SubTreeReader struct {
// Read reads another list of the subtree nodes.
func (x *SubTreeReader) Read(buf []*grpcService.GetSubTreeResponse_Body) (int, error) {
for i := range len(buf) {
for i := 0; i < len(buf); i++ {
resp, err := x.cli.Recv()
if err == io.EOF {
return i, io.EOF
} else if err != nil {
return i, handleError("failed to get sub tree", err)
}
buf[i] = resp.GetBody()
buf[i] = resp.Body
}
return len(buf), nil
@ -460,7 +460,7 @@ func (x *SubTreeReader) ReadAll() ([]*grpcService.GetSubTreeResponse_Body, error
} else if err != nil {
return nil, handleError("failed to get sub tree", err)
}
res = append(res, resp.GetBody())
res = append(res, resp.Body)
}
return res, nil
@ -476,7 +476,7 @@ func (x *SubTreeReader) Next() (*grpcService.GetSubTreeResponse_Body, error) {
return nil, handleError("failed to get sub tree", err)
}
return resp.GetBody(), nil
return resp.Body, nil
}
// GetSubTree invokes eponymous method from TreeServiceClient.
@ -590,12 +590,12 @@ func (p *Pool) AddNodeByPath(ctx context.Context, prm AddNodeByPathParams) (uint
body := resp.GetBody()
if body == nil {
return 0, errors.New("nil body in tree service response")
} else if len(body.GetNodes()) == 0 {
} else if len(body.Nodes) == 0 {
return 0, errors.New("empty list of added nodes in tree service response")
}
// The first node is the leaf that we add, according to tree service docs.
return body.GetNodes()[0], nil
return body.Nodes[0], nil
}
// MoveNode invokes eponymous method from TreeServiceClient.