Compare commits

..

132 commits

Author SHA1 Message Date
10e63537b2 [#602] metrics: Rename blobovnicza size metric
`Size` is not size, but open db size.

Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2023-08-17 19:17:35 +00:00
809e97626b [#602] blobovnicza: Fix size counter
Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2023-08-17 19:17:35 +00:00
2e49d7ea7e [#602] blobovnicza: Init before using
Fix blobovnicza size: after restart size metric resets.

Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2023-08-17 19:17:35 +00:00
f7042c5a6f [#609] Replace zaptest.NewLogger() with zap.L()
Semantic patch:
```
@@
@@
-import "go.uber.org/zap/zaptest"
+import "go.uber.org/zap"

-zaptest.NewLogger(t)
+zap.L()
```

Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2023-08-17 16:51:10 +00:00
127c676786 [#607] *: Use keys.PublicKeys.Copy() where possible
Semantic patch:
```
@@
var dst identifier
var src identifier
var keys identifier
@@
 import keys "github.com/nspcc-dev/neo-go/pkg/crypto/keys"

-dst := make(keys.PublicKeys, len(src))
-copy(dst, src)
+dst := src.Copy()
```

Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2023-08-17 16:50:02 +00:00
e604a3d749 [#607] *: Use zap.Stringer() where possible
Semantic patch:
```
@@
var f expression
var t expression
var a expression
@@
 f(
    ...,
-    zap.String(t, a.String()),
+    zap.Stringer(t, a),
    ...,
)
```

Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2023-08-17 16:50:02 +00:00
a8de37c8a2 [#607] *: Remove redundant if on error returns
Semantic patch:
```
@@
@@
-if err != nil { return err }
-return nil
+return err
```

Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2023-08-17 16:50:02 +00:00
5a51b78946 [#620] object: Send status response for server-side streams
Previously status responses were wrapped in the gRPC error and thus
couldn't be correctly handled on client.

Introduced in c2617baf63, thanks @ale64bit for having found.

Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2023-08-17 12:52:38 +00:00
6407bb5bd1 [#619] node: Fix object put when copies numbers contains only zeros
In this case object should placement according to replicas.

Signed-off-by: Anton Nikiforov <an.nikiforov@yadro.com>
2023-08-17 11:31:34 +03:00
5335e7089e [#615] pilorama: Speedup TestForest_ApplyRandom()
Some of our pilorama tests fail on CI.
The reasons are not obvious, but one possible improvement
is using `WithNoSync` option for these. It should have much effect,
because we are writing on the tmpfs, but doesn't hurt anyway.

If I replace `t.TempDir()` with a local directory, test execution time
goes down from 5s (sync) to 0.4s (nosync), which is the same time as
with `t.TempDir()`. Maybe we have some strange CI configuration.

```
panic: test timed out after 10m0s
running tests:
	TestForest_ApplyRandom (8m22s)
	TestForest_ApplyRandom/bbolt (8m21s)
...
goroutine 170 [syscall]:
syscall.Syscall(0xc000100000?, 0xc00047b758?, 0x6aff9a?, 0xc00041c1b0?)
	/opt/hostedtoolcache/go/1.20.7/x64/src/syscall/syscall_linux.go:69 +0x27
syscall.Fdatasync(0x9e35c0?)
	/opt/hostedtoolcache/go/1.20.7/x64/src/syscall/zsyscall_linux_amd64.go:418 +0x2a
go.etcd.io/bbolt.fdatasync(0xc000189000?)
```

Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2023-08-16 19:09:08 +00:00
2efe9cc1be [#585] writecache: Fix DB counter
Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2023-08-16 14:47:44 +03:00
58c8722c81 [#585] fstree: Add optional file counter
Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2023-08-16 14:47:44 +03:00
baad49990c [#585] fstree: Return logical error if object deleted
Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2023-08-16 14:47:44 +03:00
0c52186572 [#585] fstree: Remove unused method
Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2023-08-16 14:47:44 +03:00
eec97d177e [#585] writecache: Count items periodically
Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2023-08-16 14:47:44 +03:00
d15199c5d8 [#596] engine: Consider context errors as logical
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2023-08-16 10:39:41 +03:00
bc425b5bad [#608] pre-commit: Fix linter
Signed-off-by: Alexander Chuprov <a.chuprov@yadro.com>
2023-08-15 07:42:46 +00:00
88b6755c5e [#598] Fix use-after-close bug in badger writecache
Signed-off-by: Alejandro Lopez <a.lopez@yadro.com>
2023-08-15 07:04:06 +00:00
ae8be495c8 [#xx] Avoid manual management of files in tests
Signed-off-by: Alejandro Lopez <a.lopez@yadro.com>
2023-08-14 14:01:39 +03:00
376f03a445 [#598] Hold mode mutex when setting mode
Signed-off-by: Alejandro Lopez <a.lopez@yadro.com>
2023-08-14 07:12:27 +00:00
ad87493c41 [#8] Bump required go version to go1.20
Signed-off-by: Alexander Chuprov <a.chuprov@yadro.com>
2023-08-11 15:51:06 +03:00
21800e9fcc [#162] core: Move literals to constants
Signed-off-by: Alexander Chuprov <a.chuprov@yadro.com>
2023-08-11 15:48:42 +03:00
a5f51add25 [#162] ci: Add noliteral linter
Signed-off-by: Alexander Chuprov <a.chuprov@yadro.com>
2023-08-11 15:48:42 +03:00
abdb0910cc [#600] adm/tests: Add missing WaitGroup.Add()
```
panic: sync: negative WaitGroup counter
```

Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2023-08-11 09:50:07 +00:00
4d2af137e9 [#500] .forgejo: Add DCO action
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2023-08-11 09:47:41 +00:00
b44a8dd46c [#597] *: Fix linter warnings
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2023-08-11 09:55:51 +03:00
20af34ecdb [#591] cli: fix SDK PrmContainerDelete usage for DeleteContainerPrm
Signed-off-by: Airat Arifullin <a.arifullin@yadro.com>
2023-08-10 16:40:35 +00:00
dd988a5912 [#592] adm: Allow to tick epoch concurrently
Epoch can be ticked by IR in the background. Because the only usecase
for this is to apply some changes, the right behaviour is ignoring an
error, not retrying.

Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2023-08-10 16:40:00 +00:00
32a9f51586 [#562] logs: Remove superfluous comment
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2023-08-10 16:38:43 +00:00
e084c47bd6 [#248] innerring: Remove audit from tests
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2023-08-10 16:38:43 +00:00
779da6ec35 [#248] morph: Remove audit contract name
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2023-08-10 16:38:43 +00:00
2685b1d548 [#248] adm: Do not deploy audit contract
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2023-08-10 16:38:43 +00:00
55ce4dc075 [#248] morph: Remove obsolete network parameters
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2023-08-10 16:38:43 +00:00
c54fcb297d [#248] cli,node: Remove obsolete network parameters
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2023-08-10 16:38:43 +00:00
26a78aa366 [#248] adm: Remove obsolete network parameters
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2023-08-10 16:38:43 +00:00
4ad0ebb32f [#565] Add metrics for current GRPC endpoint status
Signed-off-by: Anton Nikiforov <an.nikiforov@yadro.com>
2023-08-10 16:37:49 +00:00
c3e23a1448 [#578] gendoc: Allow to override flags
The command is used in multiple places across the whole FrostFS
ecosystem. While we want to have uniform interfaces everywhere,
sometimes we can't: already defined global flags can be harder to change
because of our obligations to the users. Cobra framework doesn't allow
conflicting flags (we can have global ones), so allow to override them.

Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2023-08-10 11:10:11 +00:00
6bcba27757 [#578] gendoc: Remove flag shorthands
Make it harder to encounter conflicts in already existing commands.
Because the command is executed once, I don't think the usability is
worse.

Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2023-08-10 11:10:11 +00:00
dca31e8888 [#578] gendoc: Add date to man pages
```
HISTORY
       8-Aug-2023 Auto generated by spf13/cobra
```

Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2023-08-10 11:10:11 +00:00
b02a1a34c1 [#578] gendoc: Allow to customize man pages
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2023-08-10 11:10:11 +00:00
082370583f [#578] gendoc: Replace NSPCC with FrostFS
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2023-08-10 11:10:11 +00:00
34b5d90441 [#590] cli: fix SDK PrmContainerPut usage for PutContainerPrm
Signed-off-by: Airat Arifullin <a.arifullin@yadro.com
2023-08-10 11:09:31 +00:00
6186329aec [#590] cli: fix SDK PrmContainerGet usage for GetContainerPrm
Signed-off-by: Airat Arifullin <a.arifullin@yadro.com>
2023-08-10 11:09:31 +00:00
c3c0574e3c [#589] cli: Add control shards evacuate command
It was accidentally removed in f7c0b50d70.

Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2023-08-10 09:06:06 +00:00
8f994163ee [#586] Fix writecache benchmarks and refactor hacky NeedsCompression
Signed-off-by: Alejandro Lopez <a.lopez@yadro.com>
2023-08-10 08:05:18 +00:00
023b90342c [#584] Disable compression in badger writecache
Signed-off-by: Alejandro Lopez <a.lopez@yadro.com>
2023-08-10 10:14:06 +03:00
d641cba2fc [#587] Do not use math/rand.Read
Fix staticcheck warnings after go1.20 update.

Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2023-08-09 16:02:44 +03:00
33c11be0cf [#587] .golangci.yml: Fix pre-commit warnings
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2023-08-09 16:02:44 +03:00
5b7e4a51b7 [#481] Update frostfs-sdk-go and error pointer receivers
Signed-off-by: Alejandro Lopez <a.lopez@yadro.com>
2023-08-09 10:26:53 +00:00
de3d1eb99c [#581] Bump required go version to go1.20
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2023-08-09 10:16:48 +00:00
ae322e9f73 [#576] Set SyncWrites for badger writecache by default
Signed-off-by: Alejandro Lopez <a.lopez@yadro.com>
2023-08-09 11:28:26 +03:00
8d589314b5 [#560] node: Fix Put in multi REP with intersecting sets of nodes
Once the node was processed it skipped, at the step of forming
result in case when all nodes skipped, because processed for
previous REP, service mark the whole request as incomplete.

Example of policies which are unblocked:
- REP 1 REP 1 CBF 1
- REP 4 IN X REP 4 IN Y
  CBF 4
  SELECT 2 FROM FX AS X SELECT 2 FROM FY AS Y
  FILTER Country EQ Russia OR Country EQ Sweden OR Country EQ Finland AS FY
  FILTER Price GE 0 AS FX

Signed-off-by: Anton Nikiforov <an.nikiforov@yadro.com>
2023-08-08 10:22:53 +00:00
7da4306e38 [#575] writecache: Fix log level for badger writecache
Signed-off-by: Alejandro Lopez <a.lopez@yadro.com>
2023-08-08 08:16:36 +00:00
d3a52ec73a [#516] node: Send bootstrap request if attributes were updated
Consider following situation:
1. Epoch tick.
2. Update node attributes in the config.
3. Restart node.

Because we already sent bootstrap query after (1) and we are still
online, new bootstrap query won't be sent. This leads to the node
attributes being updated after another epoch.

Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2023-08-08 07:40:26 +00:00
0e697266c3 [#563] writecache: Fix metrics and bolt delete
Estimate cache size after delete objects to update metric.
Update counters on small object deletion.
Do not count bbolt DB file as FSTree object.

Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2023-08-07 12:53:28 +00:00
5bbfebba2d [#570] Fix writecache type constant copy-pasta bug
Signed-off-by: Alejandro Lopez <a.lopez@yadro.com>
2023-08-07 12:06:44 +03:00
1a0cb0f34a [#421] Try using badger for the write-cache
Signed-off-by: Alejandro Lopez <a.lopez@yadro.com>
2023-08-07 08:16:57 +00:00
65c72f3e0b [#559] Remove manual path handling in fstree tests
Signed-off-by: Alejandro Lopez <a.lopez@yadro.com>
2023-08-03 10:03:41 +03:00
1e8b4b8a17 [#557] services: Regenerate stable marshalers
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2023-08-02 13:37:40 +00:00
435a581b5e [#557] go.mod: Update sdk-go and api-go
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2023-08-02 13:37:40 +00:00
93c46cfdf0 [#550] cli: make get-op-log meta pretty formatted
Close #550

Signed-off-by: Airat Arifullin a.arifullin@yadro.com
2023-08-02 10:27:15 +00:00
1b7b54ba89 [#540] cli: refactor client parameters usage
Signed-off-by: Airat Arifullin a.arifullin@yadro.com
2023-08-02 09:44:51 +00:00
b3695411d9 [#553] eacl: Fix bug with casting to ObjectAccessDenied error
Signed-off-by: Airat Arifullin a.arifullin@yadro.com
2023-08-02 07:22:48 +00:00
ec8a631d31 [#542] Update test for SizeInBytesSafe function
Signed-off-by: Anton Nikiforov <an.nikiforov@yadro.com>
2023-08-01 13:38:00 +00:00
9ca63ac8c3 [#542] Fix bug in SizeInBytesSafe function
Signed-off-by: Anton Nikiforov <an.nikiforov@yadro.com>
2023-08-01 13:38:00 +00:00
b8052c794e [#547] go.mod: Update api-go, sdk-go
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2023-07-31 15:35:15 +00:00
35dc64bd7b [#547] metabase: Fix datarace in tests
Quite an old one bf9e938a3b.

Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2023-07-31 15:35:15 +00:00
05ac9e3637 [#547] objectsvc: Work with traversal struct from a single thread
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2023-07-31 15:35:15 +00:00
7b0fdf0202 [#533] services: Assume API supports status codes
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2023-07-31 15:35:15 +00:00
ec8b4fdc48 [#541] writecache/test: Close writecache on exit
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2023-07-29 10:57:25 +00:00
ad5f527bd3 [#541] writecache/test: Remove initWC()
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2023-07-29 10:57:25 +00:00
ea32913430 [#543] putsvc: Fix PutSingle implementation
Add Lock and Delete handlers to local PutSingle.

Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2023-07-28 12:09:41 +00:00
99bb488ebd [#539] getsvc: Write payload direct to out stream
To reduce memory allocations.

Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2023-07-27 17:02:08 +03:00
286242cad0 [#539] getsvc: Use buffer to assemble object
To reduce memory consumption.

Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2023-07-27 17:02:08 +03:00
32c77f3a23 [#537] node: Add runtime.memory_limit config parameter
This parameter allows to set soft memory limit for Go GC.

Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2023-07-25 17:27:09 +03:00
5ff82ff04f [#6] services/object: Simplify local/remote targets
We do not use the return result from Close() and we always execute both
methods in succession. It makes sense to unite them.

Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2023-07-21 18:39:12 +03:00
448b48287c [#6] services/util: Do not panic in sign function
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2023-07-21 18:39:12 +03:00
c2617baf63 [#6] services/util: Remove remaining stream wrappers
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2023-07-21 18:39:09 +03:00
372160d048 [#6] services/util: Remove SignService.HandleUnaryRequest
There is no need in a wrapper with many from-`interface{}` conversions.

Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2023-07-21 18:36:08 +03:00
fef172c5b0 [#6] services/util: Simplify response.Service
It has only 1 parameter, which is obligatory.

Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2023-07-21 18:29:57 +03:00
5a4054eeb6 [#535] .forgejo: Add vulncheck workflow
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2023-07-20 09:52:03 +00:00
eed594431f [#335] treesvc: Add GetSubTree ordering unit test
Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2023-07-20 10:14:10 +03:00
af82c2865e [#335] treesvc: Fix inmemory unit tests and nil meta items
Bolt forest saves empty slice of items. Now inmemory forest
does it the same way.

Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2023-07-20 10:14:10 +03:00
b4e72a2dfd [#335] treesvc: Sort nodes by Filename in GetSubTree
Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2023-07-20 10:14:10 +03:00
94df541426 [#530] go.mod: Update frostfs-sdk-go and frostfs-api-go versions
Signed-off-by: Airat Arifullin a.arifullin@yadro.com
2023-07-19 16:15:21 +03:00
57e7fb5ccf [#521] cli: Add common aliases to policy playground commands
Signed-off-by: Alejandro Lopez <a.lopez@yadro.com>
2023-07-19 11:34:08 +00:00
24dffdac6f [#521] cli: Add netmap load command to policy playground
Signed-off-by: Alejandro Lopez <a.lopez@yadro.com>
2023-07-19 11:34:08 +00:00
a9d04ba86f [#244] Remove --local-dump from frostfs-adm config
Signed-off-by: Alejandro Lopez <a.lopez@yadro.com>
2023-07-18 13:51:25 +00:00
6429975584 [#511] docs: Remove GitHub mentions from CONTRIBUTING.md
Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2023-07-18 13:24:02 +00:00
4680087711 [#527] Add support for select-filter expressions in policy playground
Signed-off-by: Alejandro Lopez <a.lopez@yadro.com>
2023-07-18 13:15:58 +00:00
f0355a453e [#463] policer: Remove capacity rebalance logic
Current implementation has some quirks. For example,
using only half of object.put.pool_size_remote threads
tells replicator that is node is 50% loaded,
but in reality we could be putting lot's of big objects.

Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2023-07-18 10:52:12 +00:00
8b78db74bc [#463] replicator: Add tracing span
Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2023-07-18 10:52:12 +00:00
8966dd8e35 [#463] putsvc: Use PutSingle RPC for remote target
Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2023-07-18 10:52:12 +00:00
b2487e8cc5 [#516] node: Do not bootstrap if node is online candidate
Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2023-07-18 10:51:02 +00:00
3b66f98f27 [#519] Use Address.Equals in policer tests
Signed-off-by: Alejandro Lopez <a.lopez@yadro.com>
2023-07-17 10:45:07 +00:00
3e8de14e7d [#382] evacuate: Fix unit tests
Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2023-07-17 10:24:05 +00:00
a0c7045f29 [#512] cli: Refactor UX for bearer create command
Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2023-07-17 11:45:27 +03:00
d8e37a827f [#497] config: Add examples and unit tests
Add examples and unit tests for tree.authorized_keys section.

Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2023-07-17 08:23:28 +00:00
486287c2f7 [#524] cli: Add impersonate flag for bearer token creation
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-07-17 08:20:50 +00:00
397131b0ea [#520] objectcore: Refactor format validator
Remove redundant FIXME.
Move error to consts.

Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2023-07-17 07:07:21 +00:00
11027945d8 [#479] writecache: Fix writecache fstree flush premature ctx cancel
Signed-off-by: Alejandro Lopez <a.lopez@yadro.com>
2023-07-14 10:25:52 +03:00
8a9fc2c372 [#510] treesvc: Rename tableFromBearer to useBearer
With impersonation, the old name is no longer descriptive.

Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2023-07-12 10:23:21 +00:00
b8bcfac531 [#510] treesvc: Fix panic in bearer token processing
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2023-07-12 10:23:21 +00:00
24eb988897 [#294] deletesvc: Drop cast
Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2023-07-12 07:42:10 +00:00
c83e7c875f [#294] searchsvcv2: Refactor service constructor
Pass required deps as args.

Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2023-07-12 07:42:10 +00:00
e8091101c7 [#294] searchsvc: Refactor service constructor
Pass required deps as args.

Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2023-07-12 07:42:10 +00:00
ec9b738465 [#294] putsvcv2: Refactor service constructor
Pass required deps as args.

Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2023-07-12 07:42:10 +00:00
800a685e84 [#294] putsvc: Refactor service constructor
Pass required deps as args.

Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2023-07-12 07:42:10 +00:00
1420b8b9ea [#294] deletesvc: Refactor service constructor
Pass required deps as args.

Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2023-07-12 07:42:10 +00:00
a476d8285a [#294] deletesvcv2: Refactor service constructor
Pass required deps as args.

Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2023-07-12 07:42:10 +00:00
70a1081988 [#294] aclsvcv2: Refactor service constructor
Pass required deps as args.

Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2023-07-12 07:42:10 +00:00
18d8898b00 [#294] aclsvc: Refactor service constructor
Pass required deps as args.

Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2023-07-12 07:42:10 +00:00
61541eaec2 [#294] aclsvc: Refactor checker constructor
Pass required deps as args.

Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2023-07-12 07:42:10 +00:00
7da284f3e8 [#509] go.mod: Update sdk-go
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2023-07-11 17:38:00 +03:00
80481c015c [#509] putsvc: Omit AccessIdentifiers from iteratePlacement()
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2023-07-11 17:38:00 +03:00
0754e6e654 [#390] cli: Fix bearer token reading for tree subcommands
Signed-off-by: Airat Arifullin a.arifullin@yadro.com
2023-07-11 14:11:30 +00:00
676a3efa79 [#390] cli: Make tree commands errors and messages more verbose
Signed-off-by: Airat Arifullin a.arifullin@yadro.com
2023-07-11 14:11:30 +00:00
c42db4e761 [#390] cli: Support more tree service API calls in CLI
* Support healthcheck API call
* Support move API call
* Support remove API call
* Support getSubtree API call
* Support getOpLog API call

Signed-off-by: Airat Arifullin a.arifullin@yadro.com
2023-07-11 14:11:30 +00:00
a65e26878b [#486] put service: Fix error typo
Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2023-07-10 15:49:21 +03:00
fcbf90d31b [#486] node: Add PutSingle implemetation
Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2023-07-10 15:49:21 +03:00
7b76527759 [#486] node: Add PutSingle wrappers
Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2023-07-10 15:49:21 +03:00
9be5d44a46 [#486] node: Update api-go and sdk-go versions
Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2023-07-10 15:49:21 +03:00
040a623d39 [#476] cli: Fix object nodes command
Do not fail if client creation failed.
Use external addresses to create the client too.
Use public key as node ID.

Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2023-07-10 09:58:31 +03:00
140d970a95 [#498] policer: Fix objectSDK import
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2023-07-07 18:52:40 +03:00
2310a5c7ba [#498] policer: Allow to set sleep duration between iterations
Speed up tests on CI.

Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2023-07-07 14:52:33 +00:00
e858479a74 [#498] policer: Explicitly Rewind() iterator after finish
Previously, we can continue to return `EndOfListing` infinitely.
Reflect iterator reuse via Rewind() method.

Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2023-07-07 14:52:33 +00:00
a0d51090a4 [#482] Enable staticcheck in foregjo actions
Signed-off-by: Anton Nikiforov <an.nikiforov@yadro.com>
2023-07-07 10:12:49 +03:00
033eaf77e1 [#496] node: Fix linter importas
Standardize the alias of the
import frostfs-sdk-go/object as objectSDK.

Signed-off-by: Alexander Chuprov <a.chuprov@yadro.com>
2023-07-06 15:36:41 +03:00
4bbe9cc936 [#496] .golangci.yml: Add importas linter
Add importas linter

Signed-off-by: Alexander Chuprov <a.chuprov@yadro.com>
2023-07-06 15:35:23 +03:00
6eefe9747e treesvc: Do not provide credentials unless TLS is used
4f413fe86e was perfect, except it did the opposite of what we needed.

Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2023-07-06 08:04:28 +00:00
f354b8a270 [#473] Add tests for rem values of traverser
Signed-off-by: Alex Vanin <a.vanin@yadro.com>
2023-07-05 20:13:23 +03:00
c6df6c84ae [#473] Do not modify traverse plan when [0] copies number vector is provided
Signed-off-by: Alex Vanin <a.vanin@yadro.com>
2023-07-05 19:54:43 +03:00
371 changed files with 6160 additions and 3483 deletions

View file

@ -1,4 +1,4 @@
FROM golang:1.20 as builder
FROM golang:1.21 as builder
ARG BUILD=now
ARG VERSION=dev
ARG REPO=repository

View file

@ -1,4 +1,4 @@
FROM golang:1.20
FROM golang:1.21
WORKDIR /tmp

View file

@ -1,4 +1,4 @@
FROM golang:1.20 as builder
FROM golang:1.21 as builder
ARG BUILD=now
ARG VERSION=dev
ARG REPO=repository

View file

@ -1,4 +1,4 @@
FROM golang:1.20 as builder
FROM golang:1.21 as builder
ARG BUILD=now
ARG VERSION=dev
ARG REPO=repository

View file

@ -1,4 +1,4 @@
FROM golang:1.20 as builder
FROM golang:1.21 as builder
ARG BUILD=now
ARG VERSION=dev
ARG REPO=repository

View file

@ -8,7 +8,7 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
go_versions: [ '1.19', '1.20' ]
go_versions: [ '1.20', '1.21' ]
steps:
- uses: actions/checkout@v3

View file

@ -0,0 +1,21 @@
name: DCO action
on: [pull_request]
jobs:
dco:
name: DCO
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Setup Go
uses: actions/setup-go@v3
with:
go-version: '1.21'
- name: Run commit format checker
uses: https://git.frostfs.info/TrueCloudLab/dco-go@v2
with:
from: 'origin/${{ github.event.pull_request.base.ref }}'

View file

@ -11,20 +11,21 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version: '1.20'
go-version: '1.21'
cache: true
- name: golangci-lint
uses: https://github.com/golangci/golangci-lint-action@v3
with:
version: latest
- name: Install linters
run: make lint-install
- name: Run linters
run: make lint
tests:
name: Tests
runs-on: ubuntu-latest
strategy:
matrix:
go_versions: [ '1.19', '1.20' ]
go_versions: [ '1.20', '1.21' ]
fail-fast: false
steps:
- uses: actions/checkout@v3
@ -47,8 +48,26 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version: '1.20'
go-version: '1.21'
cache: true
- name: Run tests
run: go test ./... -count=1 -race
staticcheck:
name: Staticcheck
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version: '1.21'
cache: true
- name: Install staticcheck
run: make staticcheck-install
- name: Run staticcheck
run: make staticcheck-run

View file

@ -0,0 +1,22 @@
name: Vulncheck
on: [pull_request]
jobs:
vulncheck:
name: Vulncheck
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Setup Go
uses: actions/setup-go@v3
with:
go-version: '1.21'
- name: Install govulncheck
run: go install golang.org/x/vuln/cmd/govulncheck@latest
- name: Run govulncheck
run: govulncheck ./...

View file

@ -31,6 +31,20 @@ linters-settings:
statements: 60 # default 40
gocognit:
min-complexity: 40 # default 30
importas:
no-unaliased: true
no-extra-aliases: false
alias:
pkg: git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object
alias: objectSDK
custom:
noliteral:
path: bin/external_linters.so
original-url: git.frostfs.info/TrueCloudLab/linters.git
settings:
target-methods : ["reportFlushError", "reportError"]
disable-packages: ["codes", "err", "res","exec"]
constants-package: "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
linters:
enable:
@ -62,5 +76,7 @@ linters:
- funlen
- gocognit
- contextcheck
- importas
- noliteral
disable-all: true
fast: false

View file

@ -30,10 +30,13 @@ repos:
hooks:
- id: shellcheck
- repo: https://github.com/golangci/golangci-lint
rev: v1.52.2
- repo: local
hooks:
- id: golangci-lint
- id: make-lint
name: Run Make Lint
entry: make lint
language: system
pass_filenames: false
- repo: local
hooks:

View file

@ -36,7 +36,7 @@ Changelog for FrostFS Node
- `github.com/multiformats/go-multiaddr` to `v0.9.0`
- `github.com/hashicorp/golang-lru/v2` to `v2.0.2`
- `go.uber.org/atomic` to `v1.11.0`
- Minimum go version to v1.19
- Minimum go version to v1.20
- `github.com/prometheus/client_golang` to `v1.15.1`
- `github.com/prometheus/client_model` to `v0.4.0`
- `go.opentelemetry.io/otel` to `v1.15.1`

View file

@ -3,8 +3,8 @@
First, thank you for contributing! We love and encourage pull requests from
everyone. Please follow the guidelines:
- Check the open [issues](https://github.com/TrueCloudLab/frostfs-node/issues) and
[pull requests](https://github.com/TrueCloudLab/frostfs-node/pulls) for existing
- Check the open [issues](https://git.frostfs.info/TrueCloudLab/frostfs-node/issues) and
[pull requests](https://git.frostfs.info/TrueCloudLab/frostfs-node/pulls) for existing
discussions.
- Open an issue first, to discuss a new feature or enhancement.
@ -27,19 +27,19 @@ Start by forking the `frostfs-node` repository, make changes in a branch and the
send a pull request. We encourage pull requests to discuss code changes. Here
are the steps in details:
### Set up your GitHub Repository
Fork [FrostFS node upstream](https://github.com/TrueCloudLab/frostfs-node/fork) source
### Set up your Forgejo repository
Fork [FrostFS node upstream](https://git.frostfs.info/TrueCloudLab/frostfs-node) source
repository to your own personal repository. Copy the URL of your fork (you will
need it for the `git clone` command below).
```sh
$ git clone https://github.com/TrueCloudLab/frostfs-node
$ git clone https://git.frostfs.info/TrueCloudLab/frostfs-node
```
### Set up git remote as ``upstream``
```sh
$ cd frostfs-node
$ git remote add upstream https://github.com/TrueCloudLab/frostfs-node
$ git remote add upstream https://git.frostfs.info/TrueCloudLab/frostfs-node
$ git fetch upstream
$ git merge upstream/master
...
@ -58,7 +58,7 @@ $ git checkout -b feature/123-something_awesome
After your code changes, make sure
- To add test cases for the new code.
- To run `make lint`
- To run `make lint` and `make staticcheck-run`
- To squash your commits into a single commit or a series of logically separated
commits run `git rebase -i`. It's okay to force update your pull request.
- To run `make test` and `make all` completes.
@ -89,8 +89,8 @@ $ git push origin feature/123-something_awesome
```
### Create a Pull Request
Pull requests can be created via GitHub. Refer to [this
document](https://help.github.com/articles/creating-a-pull-request/) for
Pull requests can be created via Forgejo. Refer to [this
document](https://docs.codeberg.org/collaborating/pull-requests-and-git-flow/) for
detailed steps on how to create a pull request. After a Pull Request gets peer
reviewed and approved, it will be merged.

View file

@ -7,8 +7,8 @@ VERSION ?= $(shell git describe --tags --dirty --match "v*" --always --abbrev=8
HUB_IMAGE ?= truecloudlab/frostfs
HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')"
GO_VERSION ?= 1.19
LINT_VERSION ?= 1.52.2
GO_VERSION ?= 1.21
LINT_VERSION ?= 1.54.0
ARCH = amd64
BIN = bin
@ -25,6 +25,10 @@ PKG_VERSION ?= $(shell echo $(VERSION) | sed "s/^v//" | \
sed -E "s/(.*)-(g[a-fA-F0-9]{6,8})(.*)/\1\3~\2/" | \
sed "s/-/~/")-${OS_RELEASE}
OUTPUT_LINT_DIR ?= $(shell pwd)/bin
LINT_DIR = $(OUTPUT_LINT_DIR)/golangci-lint-$(LINT_VERSION)
TMP_DIR := .cache
.PHONY: help all images dep clean fmts fmt imports test lint docker/lint
prepare-release debpackage pre-commit unpre-commit
@ -131,12 +135,30 @@ test:
pre-commit-run:
@pre-commit run -a --hook-stage manual
# Install linters
lint-install:
@mkdir -p $(TMP_DIR)
@rm -rf $(TMP_DIR)/linters
@git clone --depth 1 https://git.frostfs.info/TrueCloudLab/linters.git $(TMP_DIR)/linters
@@make -C $(TMP_DIR)/linters lib CGO_ENABLED=1 OUT_DIR=$(OUTPUT_LINT_DIR)
@rm -rf $(TMP_DIR)/linters
@rmdir $(TMP_DIR) 2>/dev/null || true
@CGO_ENABLED=1 GOBIN=$(LINT_DIR) go install github.com/golangci/golangci-lint/cmd/golangci-lint@v$(LINT_VERSION)
# Run linters
lint:
@golangci-lint --timeout=5m run
@if [ ! -d "$(LINT_DIR)" ]; then \
echo "Run make lint-install"; \
exit 1; \
fi
$(LINT_DIR)/golangci-lint run
# Install staticcheck
staticcheck-install:
@go install honnef.co/go/tools/cmd/staticcheck@latest
# Run staticcheck
staticcheck:
staticcheck-run:
@staticcheck ./...
# Run linters in Docker

View file

@ -49,7 +49,7 @@ The latest version of frostfs-node works with frostfs-contract
# Building
To make all binaries you need Go 1.19+ and `make`:
To make all binaries you need Go 1.20+ and `make`:
```
make all
```

View file

@ -36,9 +36,7 @@ alphabet-wallets: /path # path to consensus node / alphabet wallets s
network:
max_object_size: 67108864 # max size of a single FrostFS object, bytes
epoch_duration: 240 # duration of a FrostFS epoch in blocks, consider block generation frequency in the sidechain
basic_income_rate: 0 # basic income rate, for private consider 0
fee:
audit: 0 # network audit fee, for private installation consider 0
candidate: 0 # inner ring candidate registration fee, for private installation consider 0
container: 0 # container creation fee, for private installation consider 0
container_alias: 0 # container nice-name registration fee, for private installation consider 0

View file

@ -34,9 +34,7 @@ alphabet-wallets: /home/user/deploy/alphabet-wallets
network:
max_object_size: 67108864
epoch_duration: 240
basic_income_rate: 0
fee:
audit: 0
candidate: 0
container: 0
withdraw: 0
@ -142,7 +140,6 @@ Waiting for transactions to persist...
Stage 7: set addresses in NNS.
Waiting for transactions to persist...
NNS: Set alphabet0.frostfs -> f692dfb4d43a15b464eb51a7041160fb29c44b6a
NNS: Set audit.frostfs -> 7df847b993affb3852074345a7c2bd622171ee0d
NNS: Set balance.frostfs -> 103519b3067a66307080a66570c0491ee8f68879
NNS: Set container.frostfs -> cae60bdd689d185901e495352d0247752ce50846
NNS: Set frostfsid.frostfs -> c421fb60a3895865a8f24d197d6a80ef686041d2

View file

@ -18,8 +18,6 @@ type configTemplate struct {
AlphabetDir string
MaxObjectSize int
EpochDuration int
BasicIncomeRate int
AuditFee int
CandidateFee int
ContainerFee int
ContainerAliasFee int
@ -33,10 +31,8 @@ alphabet-wallets: {{ .AlphabetDir}}
network:
max_object_size: {{ .MaxObjectSize}}
epoch_duration: {{ .EpochDuration}}
basic_income_rate: {{ .BasicIncomeRate}}
homomorphic_hash_disabled: {{ .HomomorphicHashDisabled}}
fee:
audit: {{ .AuditFee}}
candidate: {{ .CandidateFee}}
container: {{ .ContainerFee}}
container_alias: {{ .ContainerAliasFee }}
@ -111,9 +107,7 @@ func generateConfigExample(appDir string, credSize int) (string, error) {
Endpoint: "https://neo.rpc.node:30333",
MaxObjectSize: 67108864, // 64 MiB
EpochDuration: 240, // 1 hour with 15s per block
BasicIncomeRate: 1_0000_0000, // 0.0001 GAS per GiB (Fixed12)
HomomorphicHashDisabled: false, // object homomorphic hash is enabled
AuditFee: 1_0000, // 0.00000001 GAS per audit (Fixed12)
CandidateFee: 100_0000_0000, // 100.0 GAS (Fixed8)
ContainerFee: 1000, // 0.000000001 * 7 GAS per container (Fixed12)
ContainerAliasFee: 500, // ContainerFee / 2

View file

@ -28,8 +28,6 @@ func TestGenerateConfigExample(t *testing.T) {
require.Equal(t, filepath.Join(appDir, "alphabet-wallets"), v.GetString("alphabet-wallets"))
require.Equal(t, 67108864, v.GetInt("network.max_object_size"))
require.Equal(t, 240, v.GetInt("network.epoch_duration"))
require.Equal(t, 100000000, v.GetInt("network.basic_income_rate"))
require.Equal(t, 10000, v.GetInt("network.fee.audit"))
require.Equal(t, 10000000000, v.GetInt("network.fee.candidate"))
require.Equal(t, 1000, v.GetInt("network.fee.container"))
require.Equal(t, 100000000, v.GetInt("network.fee.withdraw"))

View file

@ -54,8 +54,7 @@ func dumpNetworkConfig(cmd *cobra.Command, _ []string) error {
}
for k, v := range m {
switch k {
case netmap.AuditFeeConfig, netmap.BasicIncomeRateConfig,
netmap.ContainerFeeConfig, netmap.ContainerAliasFeeConfig,
case netmap.ContainerFeeConfig, netmap.ContainerAliasFeeConfig,
netmap.EpochDurationConfig, netmap.IrCandidateFeeConfig,
netmap.MaxObjectSizeConfig, netmap.WithdrawFeeConfig:
nbuf := make([]byte, 8)
@ -134,8 +133,7 @@ func parseConfigPair(kvStr string, force bool) (key string, val any, err error)
valRaw := v
switch key {
case netmap.AuditFeeConfig, netmap.BasicIncomeRateConfig,
netmap.ContainerFeeConfig, netmap.ContainerAliasFeeConfig,
case netmap.ContainerFeeConfig, netmap.ContainerAliasFeeConfig,
netmap.EpochDurationConfig, netmap.IrCandidateFeeConfig,
netmap.MaxObjectSizeConfig, netmap.WithdrawFeeConfig:
val, err = strconv.ParseInt(valRaw, 10, 64)

View file

@ -3,6 +3,7 @@ package morph
import (
"errors"
"fmt"
"strings"
"github.com/nspcc-dev/neo-go/pkg/io"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap"
@ -38,7 +39,14 @@ func forceNewEpochCmd(cmd *cobra.Command, _ []string) error {
return err
}
return wCtx.awaitTx()
if err := wCtx.awaitTx(); err != nil {
if strings.Contains(err.Error(), "invalid epoch") {
cmd.Println("Epoch has already ticked.")
return nil
}
return err
}
return nil
}
func emitNewEpochCall(bw *io.BufBinWriter, wCtx *initializeContext, nmHash util.Uint160) error {

View file

@ -104,8 +104,7 @@ func initializeWallets(v *viper.Viper, walletDir string, size int) ([]string, er
bftCount := smartcontract.GetDefaultHonestNodeCount(size)
for i := range wallets {
i := i
ps := make(keys.PublicKeys, len(pubs))
copy(ps, pubs)
ps := pubs.Copy()
errG.Go(func() error {
if err := addMultisigAccount(wallets[i], majCount, committeeAccountName, passwords[i], ps); err != nil {
return fmt.Errorf("can't create committee account: %w", err)

View file

@ -75,6 +75,7 @@ func TestGenerateAlphabet(t *testing.T) {
var wg sync.WaitGroup
for i := uint64(0); i < size; i++ {
i := i
wg.Add(1)
go func() {
defer wg.Done()
p := filepath.Join(walletDir, innerring.GlagoliticLetter(i).String()+".json")

View file

@ -210,11 +210,11 @@ func validateInit(cmd *cobra.Command) error {
func createClient(cmd *cobra.Command, v *viper.Viper, wallets []*wallet.Wallet) (Client, error) {
var c Client
var err error
if v.GetString(localDumpFlag) != "" {
if v.GetString(endpointFlag) != "" {
if ldf := cmd.Flags().Lookup(localDumpFlag); ldf != nil && ldf.Changed {
if cmd.Flags().Changed(endpointFlag) {
return nil, fmt.Errorf("`%s` and `%s` flags are mutually exclusive", endpointFlag, localDumpFlag)
}
c, err = newLocalClient(cmd, v, wallets)
c, err = newLocalClient(cmd, v, wallets, ldf.Value.String())
} else {
c, err = getN3Client(v)
}

View file

@ -41,7 +41,6 @@ const (
frostfsContract = "frostfs" // not deployed in side-chain.
processingContract = "processing" // not deployed in side-chain.
alphabetContract = "alphabet"
auditContract = "audit"
balanceContract = "balance"
containerContract = "container"
frostfsIDContract = "frostfsid"
@ -51,7 +50,6 @@ const (
var (
contractList = []string{
auditContract,
balanceContract,
containerContract,
frostfsIDContract,
@ -69,10 +67,8 @@ var (
netmapConfigKeys = []string{
netmap.EpochDurationConfig,
netmap.MaxObjectSizeConfig,
netmap.AuditFeeConfig,
netmap.ContainerFeeConfig,
netmap.ContainerAliasFeeConfig,
netmap.BasicIncomeRateConfig,
netmap.IrCandidateFeeConfig,
netmap.WithdrawFeeConfig,
netmap.HomomorphicHashingDisabledKey,
@ -533,8 +529,6 @@ func (c *initializeContext) getContractDeployData(ctrName string, keysParam []an
case processingContract:
items = append(items, c.Contracts[frostfsContract].Hash)
return items[1:] // no notary info
case auditContract:
items = append(items, c.Contracts[netmapContract].Hash)
case balanceContract:
items = append(items,
c.Contracts[netmapContract].Hash,

View file

@ -51,7 +51,7 @@ type localClient struct {
maxGasInvoke int64
}
func newLocalClient(cmd *cobra.Command, v *viper.Viper, wallets []*wallet.Wallet) (*localClient, error) {
func newLocalClient(cmd *cobra.Command, v *viper.Viper, wallets []*wallet.Wallet, dumpPath string) (*localClient, error) {
cfg, err := config.LoadFile(v.GetString(protoConfigPath))
if err != nil {
return nil, err
@ -87,7 +87,6 @@ func newLocalClient(cmd *cobra.Command, v *viper.Viper, wallets []*wallet.Wallet
go bc.Run()
dumpPath := v.GetString(localDumpFlag)
if cmd.Name() != "init" {
f, err := os.OpenFile(dumpPath, os.O_RDONLY, 0600)
if err != nil {

View file

@ -12,10 +12,8 @@ func getDefaultNetmapContractConfigMap() map[string]any {
m := make(map[string]any)
m[netmap.EpochDurationConfig] = viper.GetInt64(epochDurationInitFlag)
m[netmap.MaxObjectSizeConfig] = viper.GetInt64(maxObjectSizeInitFlag)
m[netmap.AuditFeeConfig] = viper.GetInt64(auditFeeInitFlag)
m[netmap.ContainerFeeConfig] = viper.GetInt64(containerFeeInitFlag)
m[netmap.ContainerAliasFeeConfig] = viper.GetInt64(containerAliasFeeInitFlag)
m[netmap.BasicIncomeRateConfig] = viper.GetInt64(incomeRateInitFlag)
m[netmap.IrCandidateFeeConfig] = viper.GetInt64(candidateFeeInitFlag)
m[netmap.WithdrawFeeConfig] = viper.GetInt64(withdrawFeeInitFlag)
m[netmap.HomomorphicHashingDisabledKey] = viper.GetBool(homomorphicHashDisabledInitFlag)

View file

@ -18,10 +18,6 @@ const (
maxObjectSizeCLIFlag = "max-object-size"
epochDurationInitFlag = "network.epoch_duration"
epochDurationCLIFlag = "epoch-duration"
incomeRateInitFlag = "network.basic_income_rate"
incomeRateCLIFlag = "basic-income-rate"
auditFeeInitFlag = "network.fee.audit"
auditFeeCLIFlag = "audit-fee"
containerFeeInitFlag = "network.fee.container"
containerAliasFeeInitFlag = "network.fee.container_alias"
containerFeeCLIFlag = "container-fee"
@ -69,15 +65,12 @@ var (
_ = viper.BindPFlag(endpointFlag, cmd.Flags().Lookup(endpointFlag))
_ = viper.BindPFlag(epochDurationInitFlag, cmd.Flags().Lookup(epochDurationCLIFlag))
_ = viper.BindPFlag(maxObjectSizeInitFlag, cmd.Flags().Lookup(maxObjectSizeCLIFlag))
_ = viper.BindPFlag(incomeRateInitFlag, cmd.Flags().Lookup(incomeRateCLIFlag))
_ = viper.BindPFlag(homomorphicHashDisabledInitFlag, cmd.Flags().Lookup(homomorphicHashDisabledCLIFlag))
_ = viper.BindPFlag(auditFeeInitFlag, cmd.Flags().Lookup(auditFeeCLIFlag))
_ = viper.BindPFlag(candidateFeeInitFlag, cmd.Flags().Lookup(candidateFeeCLIFlag))
_ = viper.BindPFlag(containerFeeInitFlag, cmd.Flags().Lookup(containerFeeCLIFlag))
_ = viper.BindPFlag(containerAliasFeeInitFlag, cmd.Flags().Lookup(containerAliasFeeCLIFlag))
_ = viper.BindPFlag(withdrawFeeInitFlag, cmd.Flags().Lookup(withdrawFeeCLIFlag))
_ = viper.BindPFlag(protoConfigPath, cmd.Flags().Lookup(protoConfigPath))
_ = viper.BindPFlag(localDumpFlag, cmd.Flags().Lookup(localDumpFlag))
},
RunE: initializeSideChainCmd,
}

View file

@ -45,7 +45,7 @@ func init() {
rootCmd.AddCommand(storagecfg.RootCmd)
rootCmd.AddCommand(autocomplete.Command("frostfs-adm"))
rootCmd.AddCommand(gendoc.Command(rootCmd))
rootCmd.AddCommand(gendoc.Command(rootCmd, gendoc.Options{}))
}
func Execute() error {

View file

@ -13,7 +13,7 @@ import (
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version"
)
@ -70,8 +70,8 @@ func ListContainers(ctx context.Context, prm ListContainersPrm) (res ListContain
// PutContainerPrm groups parameters of PutContainer operation.
type PutContainerPrm struct {
commonPrm
client.PrmContainerPut
Client *client.Client
ClientParams client.PrmContainerPut
}
// PutContainerRes groups the resulting values of PutContainer operation.
@ -93,7 +93,7 @@ func (x PutContainerRes) ID() cid.ID {
//
// Returns any error which prevented the operation from completing correctly in error return.
func PutContainer(ctx context.Context, prm PutContainerPrm) (res PutContainerRes, err error) {
cliRes, err := prm.cli.ContainerPut(ctx, prm.PrmContainerPut)
cliRes, err := prm.Client.ContainerPut(ctx, prm.ClientParams)
if err == nil {
res.cnr = cliRes.ID()
}
@ -103,13 +103,15 @@ func PutContainer(ctx context.Context, prm PutContainerPrm) (res PutContainerRes
// GetContainerPrm groups parameters of GetContainer operation.
type GetContainerPrm struct {
commonPrm
cliPrm client.PrmContainerGet
Client *client.Client
ClientParams client.PrmContainerGet
}
// SetContainer sets identifier of the container to be read.
//
// Deprecated: Use GetContainerPrm.ClientParams.ContainerID instead.
func (x *GetContainerPrm) SetContainer(id cid.ID) {
x.cliPrm.SetContainer(id)
x.ClientParams.ContainerID = &id
}
// GetContainerRes groups the resulting values of GetContainer operation.
@ -126,7 +128,7 @@ func (x GetContainerRes) Container() containerSDK.Container {
//
// Returns any error which prevented the operation from completing correctly in error return.
func GetContainer(ctx context.Context, prm GetContainerPrm) (res GetContainerRes, err error) {
res.cliRes, err = prm.cli.ContainerGet(ctx, prm.cliPrm)
res.cliRes, err = prm.Client.ContainerGet(ctx, prm.ClientParams)
return
}
@ -134,9 +136,12 @@ func GetContainer(ctx context.Context, prm GetContainerPrm) (res GetContainerRes
// IsACLExtendable checks if ACL of the container referenced by the given identifier
// can be extended. Client connection MUST BE correctly established in advance.
func IsACLExtendable(ctx context.Context, c *client.Client, cnr cid.ID) (bool, error) {
var prm GetContainerPrm
prm.SetClient(c)
prm.SetContainer(cnr)
prm := GetContainerPrm{
Client: c,
ClientParams: client.PrmContainerGet{
ContainerID: &cnr,
},
}
res, err := GetContainer(ctx, prm)
if err != nil {
@ -148,8 +153,8 @@ func IsACLExtendable(ctx context.Context, c *client.Client, cnr cid.ID) (bool, e
// DeleteContainerPrm groups parameters of DeleteContainerPrm operation.
type DeleteContainerPrm struct {
commonPrm
client.PrmContainerDelete
Client *client.Client
ClientParams client.PrmContainerDelete
}
// DeleteContainerRes groups the resulting values of DeleteContainer operation.
@ -164,7 +169,7 @@ type DeleteContainerRes struct{}
//
// Returns any error which prevented the operation from completing correctly in error return.
func DeleteContainer(ctx context.Context, prm DeleteContainerPrm) (res DeleteContainerRes, err error) {
_, err = prm.cli.ContainerDelete(ctx, prm.PrmContainerDelete)
_, err = prm.Client.ContainerDelete(ctx, prm.ClientParams)
return
}
@ -331,17 +336,17 @@ type PutObjectPrm struct {
copyNum []uint32
hdr *object.Object
hdr *objectSDK.Object
rdr io.Reader
headerCallback func(*object.Object)
headerCallback func(*objectSDK.Object)
prepareLocally bool
}
// SetHeader sets object header.
func (x *PutObjectPrm) SetHeader(hdr *object.Object) {
func (x *PutObjectPrm) SetHeader(hdr *objectSDK.Object) {
x.hdr = hdr
}
@ -352,7 +357,7 @@ func (x *PutObjectPrm) SetPayloadReader(rdr io.Reader) {
// SetHeaderCallback sets callback which is called on the object after the header is received
// but before the payload is written.
func (x *PutObjectPrm) SetHeaderCallback(f func(*object.Object)) {
func (x *PutObjectPrm) SetHeaderCallback(f func(*objectSDK.Object)) {
x.headerCallback = f
}
@ -532,22 +537,22 @@ type GetObjectPrm struct {
objectAddressPrm
rawPrm
payloadWriterPrm
headerCallback func(*object.Object)
headerCallback func(*objectSDK.Object)
}
// SetHeaderCallback sets callback which is called on the object after the header is received
// but before the payload is written.
func (p *GetObjectPrm) SetHeaderCallback(f func(*object.Object)) {
func (p *GetObjectPrm) SetHeaderCallback(f func(*objectSDK.Object)) {
p.headerCallback = f
}
// GetObjectRes groups the resulting values of GetObject operation.
type GetObjectRes struct {
hdr *object.Object
hdr *objectSDK.Object
}
// Header returns the header of the request object.
func (x GetObjectRes) Header() *object.Object {
func (x GetObjectRes) Header() *objectSDK.Object {
return x.hdr
}
@ -585,7 +590,7 @@ func GetObject(ctx context.Context, prm GetObjectPrm) (*GetObjectRes, error) {
return nil, fmt.Errorf("init object reading on client: %w", err)
}
var hdr object.Object
var hdr objectSDK.Object
if !rdr.ReadHeader(&hdr) {
_, err = rdr.Close()
@ -621,11 +626,11 @@ func (x *HeadObjectPrm) SetMainOnlyFlag(v bool) {
// HeadObjectRes groups the resulting values of HeadObject operation.
type HeadObjectRes struct {
hdr *object.Object
hdr *objectSDK.Object
}
// Header returns the requested object header.
func (x HeadObjectRes) Header() *object.Object {
func (x HeadObjectRes) Header() *objectSDK.Object {
return x.hdr
}
@ -661,7 +666,7 @@ func HeadObject(ctx context.Context, prm HeadObjectPrm) (*HeadObjectRes, error)
return nil, fmt.Errorf("read object header via client: %w", err)
}
var hdr object.Object
var hdr objectSDK.Object
if !res.ReadHeader(&hdr) {
return nil, fmt.Errorf("missing header in response")
@ -677,11 +682,11 @@ type SearchObjectsPrm struct {
commonObjectPrm
containerIDPrm
filters object.SearchFilters
filters objectSDK.SearchFilters
}
// SetFilters sets search filters.
func (x *SearchObjectsPrm) SetFilters(filters object.SearchFilters) {
func (x *SearchObjectsPrm) SetFilters(filters objectSDK.SearchFilters) {
x.filters = filters
}
@ -754,7 +759,7 @@ type HashPayloadRangesPrm struct {
tz bool
rngs []*object.Range
rngs []*objectSDK.Range
salt []byte
}
@ -765,7 +770,7 @@ func (x *HashPayloadRangesPrm) TZ() {
}
// SetRanges sets a list of payload ranges to hash.
func (x *HashPayloadRangesPrm) SetRanges(rngs []*object.Range) {
func (x *HashPayloadRangesPrm) SetRanges(rngs []*objectSDK.Range) {
x.rngs = rngs
}
@ -839,11 +844,11 @@ type PayloadRangePrm struct {
rawPrm
payloadWriterPrm
rng *object.Range
rng *objectSDK.Range
}
// SetRange sets payload range to read.
func (x *PayloadRangePrm) SetRange(rng *object.Range) {
func (x *PayloadRangePrm) SetRange(rng *objectSDK.Range) {
x.rng = rng
}

View file

@ -24,6 +24,7 @@ const (
ownerFlag = "owner"
outFlag = "out"
jsonFlag = commonflags.JSON
impersonateFlag = "impersonate"
)
var createCmd = &cobra.Command{
@ -39,19 +40,20 @@ is set to current epoch + n.
}
func init() {
createCmd.Flags().StringP(eaclFlag, "e", "", "Path to the extended ACL table")
createCmd.Flags().StringP(issuedAtFlag, "i", "", "Epoch to issue token at")
createCmd.Flags().StringP(notValidBeforeFlag, "n", "", "Not valid before epoch")
createCmd.Flags().StringP(eaclFlag, "e", "", "Path to the extended ACL table (mutually exclusive with --impersonate flag)")
createCmd.Flags().StringP(issuedAtFlag, "i", "+0", "Epoch to issue token at")
createCmd.Flags().StringP(notValidBeforeFlag, "n", "+0", "Not valid before epoch")
createCmd.Flags().StringP(commonflags.ExpireAt, "x", "", "The last active epoch for the token")
createCmd.Flags().StringP(ownerFlag, "o", "", "Token owner")
createCmd.Flags().String(outFlag, "", "File to write token to")
createCmd.Flags().Bool(jsonFlag, false, "Output token in JSON")
createCmd.Flags().Bool(impersonateFlag, false, "Mark token as impersonate to consider the token signer as the request owner (mutually exclusive with --eacl flag)")
createCmd.Flags().StringP(commonflags.RPC, commonflags.RPCShorthand, commonflags.RPCDefault, commonflags.RPCUsage)
createCmd.MarkFlagsMutuallyExclusive(eaclFlag, impersonateFlag)
_ = cobra.MarkFlagFilename(createCmd.Flags(), eaclFlag)
_ = cobra.MarkFlagRequired(createCmd.Flags(), issuedAtFlag)
_ = cobra.MarkFlagRequired(createCmd.Flags(), notValidBeforeFlag)
_ = cobra.MarkFlagRequired(createCmd.Flags(), commonflags.ExpireAt)
_ = cobra.MarkFlagRequired(createCmd.Flags(), ownerFlag)
_ = cobra.MarkFlagRequired(createCmd.Flags(), outFlag)
@ -68,10 +70,14 @@ func createToken(cmd *cobra.Command, _ []string) {
commonCmd.ExitOnErr(cmd, "can't parse --"+notValidBeforeFlag+" flag: %w", err)
if iatRelative || expRelative || nvbRelative {
endpoint, _ := cmd.Flags().GetString(commonflags.RPC)
if len(endpoint) == 0 {
commonCmd.ExitOnErr(cmd, "can't fetch current epoch: %w", fmt.Errorf("'%s' flag value must be specified", commonflags.RPC))
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
defer cancel()
endpoint, _ := cmd.Flags().GetString(commonflags.RPC)
currEpoch, err := internalclient.GetCurrentEpoch(ctx, cmd, endpoint)
commonCmd.ExitOnErr(cmd, "can't fetch current epoch: %w", err)
@ -101,6 +107,9 @@ func createToken(cmd *cobra.Command, _ []string) {
b.SetIat(iat)
b.ForUser(ownerID)
impersonate, _ := cmd.Flags().GetBool(impersonateFlag)
b.SetImpersonate(impersonate)
eaclPath, _ := cmd.Flags().GetString(eaclFlag)
if eaclPath != "" {
table := eaclSDK.NewTable()

View file

@ -13,6 +13,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
@ -99,12 +100,12 @@ It will be stored in sidechain when inner ring will accepts it.`,
_, err = internalclient.SyncContainerSettings(cmd.Context(), syncContainerPrm)
commonCmd.ExitOnErr(cmd, "syncing container's settings rpc error: %w", err)
var putPrm internalclient.PutContainerPrm
putPrm.SetClient(cli)
putPrm.SetContainer(cnr)
if tok != nil {
putPrm.WithinSession(*tok)
putPrm := internalclient.PutContainerPrm{
Client: cli,
ClientParams: client.PrmContainerPut{
Container: &cnr,
Session: tok,
},
}
res, err := internalclient.PutContainer(cmd.Context(), putPrm)
@ -117,9 +118,12 @@ It will be stored in sidechain when inner ring will accepts it.`,
if containerAwait {
cmd.Println("awaiting...")
var getPrm internalclient.GetContainerPrm
getPrm.SetClient(cli)
getPrm.SetContainer(id)
getPrm := internalclient.GetContainerPrm{
Client: cli,
ClientParams: client.PrmContainerGet{
ContainerID: &id,
},
}
for i := 0; i < awaitTimeout; i++ {
time.Sleep(1 * time.Second)

View file

@ -9,6 +9,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"github.com/spf13/cobra"
@ -30,9 +31,12 @@ Only owner of the container has a permission to remove container.`,
if force, _ := cmd.Flags().GetBool(commonflags.ForceFlag); !force {
common.PrintVerbose(cmd, "Reading the container to check ownership...")
var getPrm internalclient.GetContainerPrm
getPrm.SetClient(cli)
getPrm.SetContainer(id)
getPrm := internalclient.GetContainerPrm{
Client: cli,
ClientParams: client.PrmContainerGet{
ContainerID: &id,
},
}
resGet, err := internalclient.GetContainer(cmd.Context(), getPrm)
commonCmd.ExitOnErr(cmd, "can't get the container: %w", err)
@ -83,12 +87,12 @@ Only owner of the container has a permission to remove container.`,
}
}
var delPrm internalclient.DeleteContainerPrm
delPrm.SetClient(cli)
delPrm.SetContainer(id)
if tok != nil {
delPrm.WithinSession(*tok)
delPrm := internalclient.DeleteContainerPrm{
Client: cli,
ClientParams: client.PrmContainerDelete{
ContainerID: &id,
Session: tok,
},
}
_, err := internalclient.DeleteContainer(cmd.Context(), delPrm)
@ -99,9 +103,12 @@ Only owner of the container has a permission to remove container.`,
if containerAwait {
cmd.Println("awaiting...")
var getPrm internalclient.GetContainerPrm
getPrm.SetClient(cli)
getPrm.SetContainer(id)
getPrm := internalclient.GetContainerPrm{
Client: cli,
ClientParams: client.PrmContainerGet{
ContainerID: &id,
},
}
for i := 0; i < awaitTimeout; i++ {
time.Sleep(1 * time.Second)

View file

@ -10,6 +10,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/util"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
@ -147,9 +148,12 @@ func getContainer(cmd *cobra.Command) (container.Container, *ecdsa.PrivateKey) {
pk = key.GetOrGenerate(cmd)
cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC)
var prm internalclient.GetContainerPrm
prm.SetClient(cli)
prm.SetContainer(id)
prm := internalclient.GetContainerPrm{
Client: cli,
ClientParams: client.PrmContainerGet{
ContainerID: &id,
},
}
res, err := internalclient.GetContainer(cmd.Context(), prm)
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)

View file

@ -52,8 +52,9 @@ var listContainersCmd = &cobra.Command{
res, err := internalclient.ListContainers(cmd.Context(), prm)
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
var prmGet internalclient.GetContainerPrm
prmGet.SetClient(cli)
prmGet := internalclient.GetContainerPrm{
Client: cli,
}
containerIDs := res.IDList()
for _, cnrID := range containerIDs {
@ -62,7 +63,8 @@ var listContainersCmd = &cobra.Command{
continue
}
prmGet.SetContainer(cnrID)
cnrID := cnrID
prmGet.ClientParams.ContainerID = &cnrID
res, err := internalclient.GetContainer(cmd.Context(), prmGet)
if err != nil {
cmd.Printf(" failed to read attributes: %v\n", err)

View file

@ -9,7 +9,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
objectCli "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/object"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/spf13/cobra"
)
@ -31,7 +31,7 @@ var listContainerObjectsCmd = &cobra.Command{
Run: func(cmd *cobra.Command, args []string) {
id := parseContainerID(cmd)
filters := new(object.SearchFilters)
filters := new(objectSDK.SearchFilters)
filters.AddRootFilter() // search only user created objects
cli := internalclient.GetSDKClientByFlag(cmd, key.GetOrGenerate(cmd), commonflags.RPC)

View file

@ -3,6 +3,7 @@ package container
import (
"bufio"
"encoding/hex"
"encoding/json"
"fmt"
"io"
"os"
@ -39,7 +40,7 @@ func (repl *policyPlaygroundREPL) handleLs(args []string) error {
for id, node := range repl.nodes {
var attrs []string
node.IterateAttributes(func(k, v string) {
attrs = append(attrs, fmt.Sprintf("%s:%s", k, v))
attrs = append(attrs, fmt.Sprintf("%s:%q", k, v))
})
fmt.Printf("\t%2d: id=%s attrs={%v}\n", i, id, strings.Join(attrs, " "))
i++
@ -69,6 +70,40 @@ func (repl *policyPlaygroundREPL) handleAdd(args []string) error {
return nil
}
func (repl *policyPlaygroundREPL) handleLoad(args []string) error {
if len(args) != 1 {
return fmt.Errorf("too few arguments for command 'add': got %d, want 1", len(args))
}
jsonNetmap := map[string]map[string]string{}
b, err := os.ReadFile(args[0])
if err != nil {
return fmt.Errorf("reading netmap file %q: %v", args[0], err)
}
if err := json.Unmarshal(b, &jsonNetmap); err != nil {
return fmt.Errorf("decoding json netmap: %v", err)
}
repl.nodes = make(map[string]netmap.NodeInfo)
for id, attrs := range jsonNetmap {
key, err := hex.DecodeString(id)
if err != nil {
return fmt.Errorf("node id must be a hex string: got %q: %v", id, err)
}
node := repl.nodes[id]
node.SetPublicKey(key)
for k, v := range attrs {
node.SetAttribute(k, v)
}
repl.nodes[id] = node
}
return nil
}
func (repl *policyPlaygroundREPL) handleRemove(args []string) error {
if len(args) == 0 {
return fmt.Errorf("too few arguments for command 'remove': got %d, want >0", len(args))
@ -82,15 +117,32 @@ func (repl *policyPlaygroundREPL) handleRemove(args []string) error {
}
func (repl *policyPlaygroundREPL) handleEval(args []string) error {
policyStr := strings.Join(args, " ")
placementPolicy, err := parseContainerPolicy(repl.cmd, policyStr)
if err != nil {
return fmt.Errorf("parsing placement policy: %v", err)
}
policyStr := strings.TrimSpace(strings.Join(args, " "))
var nodes [][]netmap.NodeInfo
nm := repl.netMap()
nodes, err := nm.ContainerNodes(*placementPolicy, nil)
if err != nil {
return fmt.Errorf("building container nodes: %v", err)
if strings.HasPrefix(policyStr, "CBF") || strings.HasPrefix(policyStr, "SELECT") || strings.HasPrefix(policyStr, "FILTER") {
// Assume that the input is a partial SELECT-FILTER expression.
// Full inline policies always start with UNIQUE or REP keywords,
// or different prefixes when it's the case of an external file.
sfExpr, err := netmap.DecodeSelectFilterString(policyStr)
if err != nil {
return fmt.Errorf("parsing select-filter expression: %v", err)
}
nodes, err = nm.SelectFilterNodes(sfExpr)
if err != nil {
return fmt.Errorf("building select-filter nodes: %v", err)
}
} else {
// Assume that the input is a full policy or input file otherwise.
placementPolicy, err := parseContainerPolicy(repl.cmd, policyStr)
if err != nil {
return fmt.Errorf("parsing placement policy: %v", err)
}
nodes, err = nm.ContainerNodes(*placementPolicy, nil)
if err != nil {
return fmt.Errorf("building container nodes: %v", err)
}
}
for i, ns := range nodes {
var ids []string
@ -131,9 +183,12 @@ func (repl *policyPlaygroundREPL) run() error {
}
cmdHandlers := map[string]func([]string) error{
"list": repl.handleLs,
"ls": repl.handleLs,
"add": repl.handleAdd,
"load": repl.handleLoad,
"remove": repl.handleRemove,
"rm": repl.handleRemove,
"eval": repl.handleEval,
}
for reader := bufio.NewReader(os.Stdin); ; {

View file

@ -13,6 +13,7 @@ var shardsCmd = &cobra.Command{
func initControlShardsCmd() {
shardsCmd.AddCommand(listShardsCmd)
shardsCmd.AddCommand(setShardModeCmd)
shardsCmd.AddCommand(evacuateShardCmd)
shardsCmd.AddCommand(evacuationShardCmd)
shardsCmd.AddCommand(flushCacheCmd)
shardsCmd.AddCommand(doctorCmd)

View file

@ -38,8 +38,6 @@ var netInfoCmd = &cobra.Command{
const format = " %s: %v\n"
cmd.Println("FrostFS network configuration (system)")
cmd.Printf(format, "Audit fee", netInfo.AuditFee())
cmd.Printf(format, "Storage price", netInfo.StoragePrice())
cmd.Printf(format, "Container fee", netInfo.ContainerFee())
cmd.Printf(format, "Epoch duration", netInfo.EpochDuration())
cmd.Printf(format, "Inner Ring candidate fee", netInfo.IRCandidateFee())

View file

@ -11,7 +11,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/cheggaaa/pb"
"github.com/spf13/cobra"
@ -84,7 +84,7 @@ func getObject(cmd *cobra.Command, _ []string) {
p = pb.New64(0)
p.Output = cmd.OutOrStdout()
prm.SetPayloadWriter(p.NewProxyWriter(payloadWriter))
prm.SetHeaderCallback(func(o *object.Object) {
prm.SetHeaderCallback(func(o *objectSDK.Object) {
p.SetTotal64(int64(o.PayloadSize()))
p.Start()
})

View file

@ -13,7 +13,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/spf13/cobra"
)
@ -77,7 +77,7 @@ func getObjectHeader(cmd *cobra.Command, _ []string) {
commonCmd.ExitOnErr(cmd, "", err)
}
func saveAndPrintHeader(cmd *cobra.Command, obj *object.Object, filename string) error {
func saveAndPrintHeader(cmd *cobra.Command, obj *objectSDK.Object, filename string) error {
bs, err := marshalHeader(cmd, obj)
if err != nil {
return fmt.Errorf("could not marshal header: %w", err)
@ -97,7 +97,7 @@ func saveAndPrintHeader(cmd *cobra.Command, obj *object.Object, filename string)
return printHeader(cmd, obj)
}
func marshalHeader(cmd *cobra.Command, hdr *object.Object) ([]byte, error) {
func marshalHeader(cmd *cobra.Command, hdr *objectSDK.Object) ([]byte, error) {
toJSON, _ := cmd.Flags().GetBool(commonflags.JSON)
toProto, _ := cmd.Flags().GetBool("proto")
switch {
@ -138,7 +138,7 @@ func printContainerID(cmd *cobra.Command, recv func() (cid.ID, bool)) {
cmd.Printf("CID: %s\n", strID)
}
func printHeader(cmd *cobra.Command, obj *object.Object) error {
func printHeader(cmd *cobra.Command, obj *objectSDK.Object) error {
printObjectID(cmd, obj.ID)
printContainerID(cmd, obj.ContainerID)
cmd.Printf("Owner: %s\n", obj.OwnerID())
@ -150,7 +150,7 @@ func printHeader(cmd *cobra.Command, obj *object.Object) error {
cmd.Println("Attributes:")
for _, attr := range obj.Attributes() {
if attr.Key() == object.AttributeTimestamp {
if attr.Key() == objectSDK.AttributeTimestamp {
cmd.Printf(" %s=%s (%s)\n",
attr.Key(),
attr.Value(),
@ -174,7 +174,7 @@ func printHeader(cmd *cobra.Command, obj *object.Object) error {
return printSplitHeader(cmd, obj)
}
func printSplitHeader(cmd *cobra.Command, obj *object.Object) error {
func printSplitHeader(cmd *cobra.Command, obj *objectSDK.Object) error {
if splitID := obj.SplitID(); splitID != nil {
cmd.Printf("Split ID: %s\n", splitID)
}

View file

@ -3,6 +3,7 @@ package object
import (
"context"
"crypto/ecdsa"
"encoding/hex"
"errors"
"fmt"
"strconv"
@ -154,9 +155,12 @@ func getPlacementPolicyAndNetmap(cmd *cobra.Command, cnrID cid.ID, cli *client.C
}
func getPlacementPolicy(ctx context.Context, cnrID cid.ID, cli *client.Client) (netmapSDK.PlacementPolicy, error) {
var prm internalclient.GetContainerPrm
prm.SetClient(cli)
prm.SetContainer(cnrID)
prm := internalclient.GetContainerPrm{
Client: cli,
ClientParams: client.PrmContainerGet{
ContainerID: &cnrID,
},
}
res, err := internalclient.GetContainer(ctx, prm)
if err != nil {
@ -223,14 +227,12 @@ func getActualPlacement(cmd *cobra.Command, netmap *netmapSDK.NetMap, requiredPl
}
}
var err error
eg, egCtx := errgroup.WithContext(cmd.Context())
for _, cand := range candidates {
cand := cand
eg.Go(func() error {
var cli *client.Client
cli, err = createClient(egCtx, cmd, cand, pk)
cli, err := createClient(egCtx, cmd, cand, pk)
if err != nil {
resultMtx.Lock()
defer resultMtx.Unlock()
@ -268,13 +270,7 @@ func getActualPlacement(cmd *cobra.Command, netmap *netmapSDK.NetMap, requiredPl
})
}
egErr := eg.Wait()
if err != nil || egErr != nil {
if err == nil {
err = egErr
}
commonCmd.ExitOnErr(cmd, "failed to get actual placement: %w", err)
}
commonCmd.ExitOnErr(cmd, "failed to get actual placement: %w", eg.Wait())
return result
}
@ -285,6 +281,7 @@ func createClient(ctx context.Context, cmd *cobra.Command, candidate netmapSDK.N
addresses = append(addresses, s)
return false
})
addresses = append(addresses, candidate.ExternalAddresses()...)
var lastErr error
for _, address := range addresses {
var networkAddr network.Address
@ -336,13 +333,9 @@ func printPlacement(cmd *cobra.Command, netmap *netmapSDK.NetMap, requiredPlacem
defer func() {
commonCmd.ExitOnErr(cmd, "failed to print placement info: %w", w.Flush())
}()
fmt.Fprintln(w, "Netmap node\tShould contain object\tActually contains object\t")
fmt.Fprintln(w, "Node ID\tShould contain object\tActually contains object\t")
for _, n := range netmap.Nodes() {
var address string
n.IterateNetworkEndpoints(func(s string) bool {
address = s
return s != ""
})
nodeID := hex.EncodeToString(n.PublicKey())
_, required := requiredPlacement[n.Hash()]
actual, actualExists := actualPlacement[n.Hash()]
actualStr := ""
@ -353,6 +346,6 @@ func printPlacement(cmd *cobra.Command, netmap *netmapSDK.NetMap, requiredPlacem
actualStr = strconv.FormatBool(actual.value)
}
}
fmt.Fprintf(w, "%s\t%s\t%s\t\n", address, strconv.FormatBool(required), actualStr)
fmt.Fprintf(w, "%s\t%s\t%s\t\n", nodeID, strconv.FormatBool(required), actualStr)
}
}

View file

@ -16,7 +16,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"github.com/cheggaaa/pb"
"github.com/spf13/cobra"
@ -81,7 +81,7 @@ func putObject(cmd *cobra.Command, _ []string) {
commonCmd.ExitOnErr(cmd, "", fmt.Errorf("can't open file '%s': %w", filename, err))
}
var payloadReader io.Reader = f
obj := object.New()
obj := objectSDK.New()
if binary {
payloadReader, cnr, ownerID = readFilePayload(filename, cmd)
@ -155,7 +155,7 @@ func parseCopyNumber(cmd *cobra.Command, copyNum string) []uint32 {
func readFilePayload(filename string, cmd *cobra.Command) (io.Reader, cid.ID, user.ID) {
buf, err := os.ReadFile(filename)
commonCmd.ExitOnErr(cmd, "unable to read given file: %w", err)
objTemp := object.New()
objTemp := objectSDK.New()
// TODO(@acid-ant): #1932 Use streams to marshal/unmarshal payload
commonCmd.ExitOnErr(cmd, "can't unmarshal object from given file: %w", objTemp.Unmarshal(buf))
payloadReader := bytes.NewReader(objTemp.Payload())
@ -174,19 +174,19 @@ func setFilePayloadReader(cmd *cobra.Command, f *os.File, prm *internalclient.Pu
p := pb.New64(fi.Size())
p.Output = cmd.OutOrStdout()
prm.SetPayloadReader(p.NewProxyReader(f))
prm.SetHeaderCallback(func(o *object.Object) { p.Start() })
prm.SetHeaderCallback(func(o *objectSDK.Object) { p.Start() })
return p
}
func setBinaryPayloadReader(cmd *cobra.Command, obj *object.Object, prm *internalclient.PutObjectPrm, payloadReader io.Reader) *pb.ProgressBar {
func setBinaryPayloadReader(cmd *cobra.Command, obj *objectSDK.Object, prm *internalclient.PutObjectPrm, payloadReader io.Reader) *pb.ProgressBar {
p := pb.New(len(obj.Payload()))
p.Output = cmd.OutOrStdout()
prm.SetPayloadReader(p.NewProxyReader(payloadReader))
prm.SetHeaderCallback(func(o *object.Object) { p.Start() })
prm.SetHeaderCallback(func(o *objectSDK.Object) { p.Start() })
return p
}
func getAllObjectAttributes(cmd *cobra.Command) []object.Attribute {
func getAllObjectAttributes(cmd *cobra.Command) []objectSDK.Attribute {
attrs, err := parseObjectAttrs(cmd)
commonCmd.ExitOnErr(cmd, "can't parse object attributes: %w", err)
@ -205,7 +205,7 @@ func getAllObjectAttributes(cmd *cobra.Command) []object.Attribute {
if !expAttrFound {
index := len(attrs)
attrs = append(attrs, object.Attribute{})
attrs = append(attrs, objectSDK.Attribute{})
attrs[index].SetKey(objectV2.SysAttributeExpEpoch)
attrs[index].SetValue(expAttrValue)
}
@ -213,7 +213,7 @@ func getAllObjectAttributes(cmd *cobra.Command) []object.Attribute {
return attrs
}
func parseObjectAttrs(cmd *cobra.Command) ([]object.Attribute, error) {
func parseObjectAttrs(cmd *cobra.Command) ([]objectSDK.Attribute, error) {
var rawAttrs []string
raw := cmd.Flag("attributes").Value.String()
@ -221,7 +221,7 @@ func parseObjectAttrs(cmd *cobra.Command) ([]object.Attribute, error) {
rawAttrs = strings.Split(raw, ",")
}
attrs := make([]object.Attribute, len(rawAttrs), len(rawAttrs)+2) // name + timestamp attributes
attrs := make([]objectSDK.Attribute, len(rawAttrs), len(rawAttrs)+2) // name + timestamp attributes
for i := range rawAttrs {
k, v, found := strings.Cut(rawAttrs[i], "=")
if !found {
@ -235,23 +235,23 @@ func parseObjectAttrs(cmd *cobra.Command) ([]object.Attribute, error) {
if !disableFilename {
filename := filepath.Base(cmd.Flag(fileFlag).Value.String())
index := len(attrs)
attrs = append(attrs, object.Attribute{})
attrs[index].SetKey(object.AttributeFileName)
attrs = append(attrs, objectSDK.Attribute{})
attrs[index].SetKey(objectSDK.AttributeFileName)
attrs[index].SetValue(filename)
}
disableTime, _ := cmd.Flags().GetBool("disable-timestamp")
if !disableTime {
index := len(attrs)
attrs = append(attrs, object.Attribute{})
attrs[index].SetKey(object.AttributeTimestamp)
attrs = append(attrs, objectSDK.Attribute{})
attrs[index].SetKey(objectSDK.AttributeTimestamp)
attrs[index].SetValue(strconv.FormatInt(time.Now().Unix(), 10))
}
return attrs, nil
}
func parseObjectNotifications(cmd *cobra.Command) (*object.NotificationInfo, error) {
func parseObjectNotifications(cmd *cobra.Command) (*objectSDK.NotificationInfo, error) {
const (
separator = ":"
useDefaultTopic = "-"
@ -267,7 +267,7 @@ func parseObjectNotifications(cmd *cobra.Command) (*object.NotificationInfo, err
return nil, fmt.Errorf("notification must be in the form of: *epoch*%s*topic*, got %s", separator, raw)
}
ni := new(object.NotificationInfo)
ni := new(objectSDK.NotificationInfo)
epoch, err := strconv.ParseUint(before, 10, 64)
if err != nil {

View file

@ -14,7 +14,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/spf13/cobra"
)
@ -102,7 +102,7 @@ func getObjectRange(cmd *cobra.Command, _ []string) {
}
func printSplitInfoErr(cmd *cobra.Command, err error) bool {
var errSplitInfo *object.SplitInfoError
var errSplitInfo *objectSDK.SplitInfoError
ok := errors.As(err, &errSplitInfo)
@ -114,14 +114,14 @@ func printSplitInfoErr(cmd *cobra.Command, err error) bool {
return ok
}
func printSplitInfo(cmd *cobra.Command, info *object.SplitInfo) {
func printSplitInfo(cmd *cobra.Command, info *objectSDK.SplitInfo) {
bs, err := marshalSplitInfo(cmd, info)
commonCmd.ExitOnErr(cmd, "can't marshal split info: %w", err)
cmd.Println(string(bs))
}
func marshalSplitInfo(cmd *cobra.Command, info *object.SplitInfo) ([]byte, error) {
func marshalSplitInfo(cmd *cobra.Command, info *objectSDK.SplitInfo) ([]byte, error) {
toJSON, _ := cmd.Flags().GetBool(commonflags.JSON)
toProto, _ := cmd.Flags().GetBool("proto")
switch {
@ -146,13 +146,13 @@ func marshalSplitInfo(cmd *cobra.Command, info *object.SplitInfo) ([]byte, error
}
}
func getRangeList(cmd *cobra.Command) ([]*object.Range, error) {
func getRangeList(cmd *cobra.Command) ([]*objectSDK.Range, error) {
v := cmd.Flag("range").Value.String()
if len(v) == 0 {
return nil, nil
}
vs := strings.Split(v, ",")
rs := make([]*object.Range, len(vs))
rs := make([]*objectSDK.Range, len(vs))
for i := range vs {
before, after, found := strings.Cut(vs[i], rangeSep)
if !found {
@ -176,7 +176,7 @@ func getRangeList(cmd *cobra.Command) ([]*object.Range, error) {
return nil, fmt.Errorf("invalid '%s' range: uint64 overflow", vs[i])
}
rs[i] = object.NewRange()
rs[i] = objectSDK.NewRange()
rs[i].SetOffset(offset)
rs[i].SetLength(length)
}

View file

@ -10,7 +10,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/spf13/cobra"
)
@ -72,18 +72,18 @@ func searchObject(cmd *cobra.Command, _ []string) {
}
}
var searchUnaryOpVocabulary = map[string]object.SearchMatchType{
"NOPRESENT": object.MatchNotPresent,
var searchUnaryOpVocabulary = map[string]objectSDK.SearchMatchType{
"NOPRESENT": objectSDK.MatchNotPresent,
}
var searchBinaryOpVocabulary = map[string]object.SearchMatchType{
"EQ": object.MatchStringEqual,
"NE": object.MatchStringNotEqual,
"COMMON_PREFIX": object.MatchCommonPrefix,
var searchBinaryOpVocabulary = map[string]objectSDK.SearchMatchType{
"EQ": objectSDK.MatchStringEqual,
"NE": objectSDK.MatchStringNotEqual,
"COMMON_PREFIX": objectSDK.MatchCommonPrefix,
}
func parseSearchFilters(cmd *cobra.Command) (object.SearchFilters, error) {
var fs object.SearchFilters
func parseSearchFilters(cmd *cobra.Command) (objectSDK.SearchFilters, error) {
var fs objectSDK.SearchFilters
for i := range searchFilters {
words := strings.Fields(searchFilters[i])
@ -97,7 +97,7 @@ func parseSearchFilters(cmd *cobra.Command) (object.SearchFilters, error) {
return nil, fmt.Errorf("could not read attributes filter from file: %w", err)
}
subFs := object.NewSearchFilters()
subFs := objectSDK.NewSearchFilters()
if err := subFs.UnmarshalJSON(data); err != nil {
return nil, fmt.Errorf("could not unmarshal attributes filter from file: %w", err)
@ -138,7 +138,7 @@ func parseSearchFilters(cmd *cobra.Command) (object.SearchFilters, error) {
return nil, fmt.Errorf("could not parse object ID: %w", err)
}
fs.AddObjectIDFilter(object.MatchStringEqual, id)
fs.AddObjectIDFilter(objectSDK.MatchStringEqual, id)
}
return fs, nil

View file

@ -16,7 +16,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
frostfsecdsa "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto/ecdsa"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
"github.com/spf13/cobra"
@ -87,7 +87,7 @@ func readObjectAddress(cmd *cobra.Command, cnr *cid.ID, obj *oid.ID) oid.Address
func readObjectAddressBin(cmd *cobra.Command, cnr *cid.ID, obj *oid.ID, filename string) oid.Address {
buf, err := os.ReadFile(filename)
commonCmd.ExitOnErr(cmd, "unable to read given file: %w", err)
objTemp := object.New()
objTemp := objectSDK.New()
commonCmd.ExitOnErr(cmd, "can't unmarshal object from given file: %w", objTemp.Unmarshal(buf))
var addr oid.Address
@ -356,7 +356,7 @@ func collectObjectRelatives(cmd *cobra.Command, cli *client.Client, cnr cid.ID,
_, err := internal.HeadObject(cmd.Context(), prmHead)
var errSplit *object.SplitInfoError
var errSplit *objectSDK.SplitInfoError
switch {
default:
@ -381,7 +381,7 @@ func collectObjectRelatives(cmd *cobra.Command, cli *client.Client, cnr cid.ID,
return tryRestoreChainInReverse(cmd, splitInfo, prmHead, cli, cnr, obj)
}
func tryGetSplitMembersByLinkingObject(cmd *cobra.Command, splitInfo *object.SplitInfo, prmHead internal.HeadObjectPrm, cnr cid.ID) ([]oid.ID, bool) {
func tryGetSplitMembersByLinkingObject(cmd *cobra.Command, splitInfo *objectSDK.SplitInfo, prmHead internal.HeadObjectPrm, cnr cid.ID) ([]oid.ID, bool) {
// collect split chain by the descending ease of operations (ease is evaluated heuristically).
// If any approach fails, we don't try the next since we assume that it will fail too.
@ -413,12 +413,12 @@ func tryGetSplitMembersByLinkingObject(cmd *cobra.Command, splitInfo *object.Spl
return nil, false
}
func tryGetSplitMembersBySplitID(cmd *cobra.Command, splitInfo *object.SplitInfo, cli *client.Client, cnr cid.ID) ([]oid.ID, bool) {
func tryGetSplitMembersBySplitID(cmd *cobra.Command, splitInfo *objectSDK.SplitInfo, cli *client.Client, cnr cid.ID) ([]oid.ID, bool) {
if idSplit := splitInfo.SplitID(); idSplit != nil {
common.PrintVerbose(cmd, "Collecting split members by split ID...")
var query object.SearchFilters
query.AddSplitIDFilter(object.MatchStringEqual, idSplit)
var query objectSDK.SearchFilters
query.AddSplitIDFilter(objectSDK.MatchStringEqual, idSplit)
var prm internal.SearchObjectsPrm
prm.SetContainerID(cnr)
@ -437,7 +437,7 @@ func tryGetSplitMembersBySplitID(cmd *cobra.Command, splitInfo *object.SplitInfo
return nil, false
}
func tryRestoreChainInReverse(cmd *cobra.Command, splitInfo *object.SplitInfo, prmHead internal.HeadObjectPrm, cli *client.Client, cnr cid.ID, obj oid.ID) []oid.ID {
func tryRestoreChainInReverse(cmd *cobra.Command, splitInfo *objectSDK.SplitInfo, prmHead internal.HeadObjectPrm, cli *client.Client, cnr cid.ID, obj oid.ID) []oid.ID {
var addrObj oid.Address
addrObj.SetContainer(cnr)
@ -482,8 +482,8 @@ func tryRestoreChainInReverse(cmd *cobra.Command, splitInfo *object.SplitInfo, p
common.PrintVerbose(cmd, "Looking for a linking object...")
var query object.SearchFilters
query.AddParentIDFilter(object.MatchStringEqual, obj)
var query objectSDK.SearchFilters
query.AddParentIDFilter(objectSDK.MatchStringEqual, obj)
var prmSearch internal.SearchObjectsPrm
prmSearch.SetClient(cli)

View file

@ -85,7 +85,7 @@ func init() {
rootCmd.AddCommand(objectCli.Cmd)
rootCmd.AddCommand(containerCli.Cmd)
rootCmd.AddCommand(tree.Cmd)
rootCmd.AddCommand(gendoc.Command(rootCmd))
rootCmd.AddCommand(gendoc.Command(rootCmd, gendoc.Options{}))
}
func entryPoint(cmd *cobra.Command, _ []string) {

View file

@ -50,24 +50,29 @@ func add(cmd *cobra.Command, _ []string) {
ctx := cmd.Context()
cli, err := _client(ctx)
commonCmd.ExitOnErr(cmd, "client: %w", err)
commonCmd.ExitOnErr(cmd, "failed to create client: %w", err)
rawCID := make([]byte, sha256.Size)
cnr.Encode(rawCID)
var bt []byte
if t := common.ReadBearerToken(cmd, bearerFlagKey); t != nil {
bt = t.Marshal()
}
req := new(tree.AddRequest)
req.Body = &tree.AddRequest_Body{
ContainerId: rawCID,
TreeId: tid,
ParentId: pid,
Meta: meta,
BearerToken: common.ReadBearerToken(cmd, bearerFlagKey).Marshal(),
BearerToken: bt,
}
commonCmd.ExitOnErr(cmd, "message signing: %w", tree.SignMessage(req, pk))
commonCmd.ExitOnErr(cmd, "signing message: %w", tree.SignMessage(req, pk))
resp, err := cli.Add(ctx, req)
commonCmd.ExitOnErr(cmd, "rpc call: %w", err)
commonCmd.ExitOnErr(cmd, "failed to cal add: %w", err)
cmd.Println("Node ID: ", resp.Body.NodeId)
}

View file

@ -10,7 +10,7 @@ import (
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"github.com/spf13/cobra"
)
@ -53,7 +53,7 @@ func addByPath(cmd *cobra.Command, _ []string) {
ctx := cmd.Context()
cli, err := _client(ctx)
commonCmd.ExitOnErr(cmd, "client: %w", err)
commonCmd.ExitOnErr(cmd, "failed to create client: %w", err)
rawCID := make([]byte, sha256.Size)
cnr.Encode(rawCID)
@ -62,23 +62,27 @@ func addByPath(cmd *cobra.Command, _ []string) {
commonCmd.ExitOnErr(cmd, "meta data parsing: %w", err)
path, _ := cmd.Flags().GetString(pathFlagKey)
// pAttr, _ := cmd.Flags().GetString(pathAttributeFlagKey)
var bt []byte
if t := common.ReadBearerToken(cmd, bearerFlagKey); t != nil {
bt = t.Marshal()
}
req := new(tree.AddByPathRequest)
req.Body = &tree.AddByPathRequest_Body{
ContainerId: rawCID,
TreeId: tid,
PathAttribute: object.AttributeFileName,
PathAttribute: objectSDK.AttributeFileName,
// PathAttribute: pAttr,
Path: strings.Split(path, "/"),
Meta: meta,
BearerToken: common.ReadBearerToken(cmd, bearerFlagKey).Marshal(),
BearerToken: bt,
}
commonCmd.ExitOnErr(cmd, "message signing: %w", tree.SignMessage(req, pk))
commonCmd.ExitOnErr(cmd, "signing message: %w", tree.SignMessage(req, pk))
resp, err := cli.AddByPath(ctx, req)
commonCmd.ExitOnErr(cmd, "rpc call: %w", err)
commonCmd.ExitOnErr(cmd, "failed to addByPath %w", err)
cmd.Printf("Parent ID: %d\n", resp.GetBody().GetParentId())

View file

@ -10,7 +10,7 @@ import (
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"github.com/spf13/cobra"
)
@ -53,33 +53,35 @@ func getByPath(cmd *cobra.Command, _ []string) {
ctx := cmd.Context()
cli, err := _client(ctx)
commonCmd.ExitOnErr(cmd, "client: %w", err)
commonCmd.ExitOnErr(cmd, "failed to create client: %w", err)
rawCID := make([]byte, sha256.Size)
cnr.Encode(rawCID)
latestOnly, _ := cmd.Flags().GetBool(latestOnlyFlagKey)
path, _ := cmd.Flags().GetString(pathFlagKey)
// pAttr, _ := cmd.Flags().GetString(pathAttributeFlagKey)
var bt []byte
if t := common.ReadBearerToken(cmd, bearerFlagKey); t != nil {
bt = t.Marshal()
}
req := new(tree.GetNodeByPathRequest)
req.Body = &tree.GetNodeByPathRequest_Body{
ContainerId: rawCID,
TreeId: tid,
PathAttribute: object.AttributeFileName,
PathAttribute: objectSDK.AttributeFileName,
// PathAttribute: pAttr,
Path: strings.Split(path, "/"),
LatestOnly: latestOnly,
AllAttributes: true,
}
if btok := common.ReadBearerToken(cmd, bearerFlagKey); btok != nil {
req.Body.BearerToken = btok.Marshal()
BearerToken: bt,
}
commonCmd.ExitOnErr(cmd, "message signing: %w", tree.SignMessage(req, pk))
commonCmd.ExitOnErr(cmd, "signing message: %w", tree.SignMessage(req, pk))
resp, err := cli.GetNodeByPath(ctx, req)
commonCmd.ExitOnErr(cmd, "rpc call: %w", err)
commonCmd.ExitOnErr(cmd, "failed to call getNodeByPath: %w", err)
nn := resp.GetBody().GetNodes()
if len(nn) == 0 {

View file

@ -0,0 +1,92 @@
package tree
import (
"crypto/sha256"
"errors"
"io"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"github.com/spf13/cobra"
)
var getOpLogCmd = &cobra.Command{
Use: "get-op-log",
Short: "Get logged operations starting with some height",
Run: getOpLog,
PersistentPreRun: func(cmd *cobra.Command, _ []string) {
commonflags.Bind(cmd)
},
}
func initGetOpLogCmd() {
commonflags.Init(getOpLogCmd)
initCTID(getOpLogCmd)
ff := getOpLogCmd.Flags()
ff.Uint64(heightFlagKey, 0, "Height to start with")
ff.Uint64(countFlagKey, 10, "Logged operations count")
_ = cobra.MarkFlagRequired(ff, commonflags.RPC)
}
func getOpLog(cmd *cobra.Command, _ []string) {
pk := key.GetOrGenerate(cmd)
cidRaw, _ := cmd.Flags().GetString(commonflags.CIDFlag)
var cnr cid.ID
err := cnr.DecodeString(cidRaw)
commonCmd.ExitOnErr(cmd, "decode container ID string: %w", err)
tid, _ := cmd.Flags().GetString(treeIDFlagKey)
ctx := cmd.Context()
cli, err := _client(ctx)
commonCmd.ExitOnErr(cmd, "failed to create client: %w", err)
rawCID := make([]byte, sha256.Size)
cnr.Encode(rawCID)
height, _ := cmd.Flags().GetUint64(heightFlagKey)
count, _ := cmd.Flags().GetUint64(countFlagKey)
req := &tree.GetOpLogRequest{
Body: &tree.GetOpLogRequest_Body{
ContainerId: rawCID,
TreeId: tid,
Height: height,
Count: count,
},
}
commonCmd.ExitOnErr(cmd, "signing message: %w", tree.SignMessage(req, pk))
resp, err := cli.GetOpLog(ctx, req)
commonCmd.ExitOnErr(cmd, "get op log: %w", err)
opLogResp, err := resp.Recv()
for ; err == nil; opLogResp, err = resp.Recv() {
o := opLogResp.GetBody().GetOperation()
cmd.Println("Parent ID: ", o.GetParentId())
cmd.Println("\tChild ID: ", o.GetChildId())
m := &pilorama.Meta{}
err = m.FromBytes(o.GetMeta())
commonCmd.ExitOnErr(cmd, "could not unmarshal meta: %w", err)
cmd.Printf("\tMeta:\n")
cmd.Printf("\t\tTime: %d\n", m.Time)
for _, item := range m.Items {
cmd.Printf("\t\t%s: %s\n", item.Key, item.Value)
}
}
if !errors.Is(err, io.EOF) {
commonCmd.ExitOnErr(cmd, "get op log response stream: %w", err)
}
}

View file

@ -0,0 +1,43 @@
package tree
import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree"
"github.com/spf13/cobra"
)
var healthcheckCmd = &cobra.Command{
Use: "healthcheck",
Short: "Check tree service availability",
Run: healthcheck,
PersistentPreRun: func(cmd *cobra.Command, _ []string) {
commonflags.Bind(cmd)
},
}
func initHealthcheckCmd() {
commonflags.Init(healthcheckCmd)
ff := healthcheckCmd.Flags()
_ = cobra.MarkFlagRequired(ff, commonflags.RPC)
}
func healthcheck(cmd *cobra.Command, _ []string) {
pk := key.GetOrGenerate(cmd)
ctx := cmd.Context()
cli, err := _client(ctx)
commonCmd.ExitOnErr(cmd, "failed to create client: %w", err)
req := &tree.HealthcheckRequest{
Body: &tree.HealthcheckRequest_Body{},
}
commonCmd.ExitOnErr(cmd, "signing message: %w", tree.SignMessage(req, pk))
_, err = cli.Healthcheck(ctx, req)
commonCmd.ExitOnErr(cmd, "failed to call healthcheck: %w", err)
common.PrintVerbose(cmd, "Successful healthcheck invocation.")
}

View file

@ -41,7 +41,7 @@ func list(cmd *cobra.Command, _ []string) {
ctx := cmd.Context()
cli, err := _client(ctx)
commonCmd.ExitOnErr(cmd, "client: %w", err)
commonCmd.ExitOnErr(cmd, "failed to create client: %w", err)
rawCID := make([]byte, sha256.Size)
cnr.Encode(rawCID)
@ -52,10 +52,10 @@ func list(cmd *cobra.Command, _ []string) {
},
}
commonCmd.ExitOnErr(cmd, "message signing: %w", tree.SignMessage(req, pk))
commonCmd.ExitOnErr(cmd, "signing message: %w", tree.SignMessage(req, pk))
resp, err := cli.TreeList(ctx, req)
commonCmd.ExitOnErr(cmd, "rpc call: %w", err)
commonCmd.ExitOnErr(cmd, "failed to call treeList %w", err)
for _, treeID := range resp.GetBody().GetIds() {
cmd.Println(treeID)

View file

@ -0,0 +1,107 @@
package tree
import (
"crypto/sha256"
"errors"
"io"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"github.com/spf13/cobra"
)
var moveCmd = &cobra.Command{
Use: "move",
Short: "Move node",
Run: move,
PersistentPreRun: func(cmd *cobra.Command, _ []string) {
commonflags.Bind(cmd)
},
}
func initMoveCmd() {
commonflags.Init(moveCmd)
initCTID(moveCmd)
ff := moveCmd.Flags()
ff.Uint64(nodeIDFlagKey, 0, "Node ID.")
ff.Uint64(parentIDFlagKey, 0, "Parent ID.")
_ = getSubtreeCmd.MarkFlagRequired(nodeIDFlagKey)
_ = getSubtreeCmd.MarkFlagRequired(parentIDFlagKey)
_ = cobra.MarkFlagRequired(ff, commonflags.RPC)
}
func move(cmd *cobra.Command, _ []string) {
pk := key.GetOrGenerate(cmd)
cidString, _ := cmd.Flags().GetString(commonflags.CIDFlag)
var cnr cid.ID
err := cnr.DecodeString(cidString)
commonCmd.ExitOnErr(cmd, "decode container ID string: %w", err)
ctx := cmd.Context()
cli, err := _client(ctx)
commonCmd.ExitOnErr(cmd, "failed to create client: %w", err)
rawCID := make([]byte, sha256.Size)
cnr.Encode(rawCID)
tid, _ := cmd.Flags().GetString(treeIDFlagKey)
pid, _ := cmd.Flags().GetUint64(parentIDFlagKey)
nid, _ := cmd.Flags().GetUint64(nodeIDFlagKey)
var bt []byte
if t := common.ReadBearerToken(cmd, bearerFlagKey); t != nil {
bt = t.Marshal()
}
subTreeReq := &tree.GetSubTreeRequest{
Body: &tree.GetSubTreeRequest_Body{
ContainerId: rawCID,
TreeId: tid,
RootId: nid,
Depth: 1,
BearerToken: bt,
},
}
commonCmd.ExitOnErr(cmd, "signing message: %w", tree.SignMessage(subTreeReq, pk))
resp, err := cli.GetSubTree(ctx, subTreeReq)
commonCmd.ExitOnErr(cmd, "rpc call: %w", err)
var meta []*tree.KeyValue
subtreeResp, err := resp.Recv()
for ; err == nil; subtreeResp, err = resp.Recv() {
meta = subtreeResp.GetBody().GetMeta()
}
if !errors.Is(err, io.EOF) {
commonCmd.ExitOnErr(cmd, "failed to read getSubTree response stream: %w", err)
}
var metaErr error
if len(meta) == 0 {
metaErr = errors.New("no meta for given node ID")
}
commonCmd.ExitOnErr(cmd, "unexpected rpc call result: %w", metaErr)
req := &tree.MoveRequest{
Body: &tree.MoveRequest_Body{
ContainerId: rawCID,
TreeId: tid,
ParentId: pid,
NodeId: nid,
Meta: meta,
},
}
commonCmd.ExitOnErr(cmd, "signing message: %w", tree.SignMessage(req, pk))
_, err = cli.Move(ctx, req)
commonCmd.ExitOnErr(cmd, "failed to call move: %w", err)
common.PrintVerbose(cmd, "Successful move invocation.")
}

View file

@ -0,0 +1,74 @@
package tree
import (
"crypto/sha256"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"github.com/spf13/cobra"
)
var removeCmd = &cobra.Command{
Use: "remove",
Short: "Remove node",
Run: remove,
PersistentPreRun: func(cmd *cobra.Command, _ []string) {
commonflags.Bind(cmd)
},
}
func initRemoveCmd() {
commonflags.Init(removeCmd)
initCTID(removeCmd)
ff := removeCmd.Flags()
ff.Uint64(nodeIDFlagKey, 0, "Node ID.")
_ = getSubtreeCmd.MarkFlagRequired(nodeIDFlagKey)
_ = cobra.MarkFlagRequired(ff, commonflags.RPC)
}
func remove(cmd *cobra.Command, _ []string) {
pk := key.GetOrGenerate(cmd)
cidString, _ := cmd.Flags().GetString(commonflags.CIDFlag)
var cnr cid.ID
err := cnr.DecodeString(cidString)
commonCmd.ExitOnErr(cmd, "decode container ID string: %w", err)
ctx := cmd.Context()
cli, err := _client(ctx)
commonCmd.ExitOnErr(cmd, "failed to create client: %w", err)
rawCID := make([]byte, sha256.Size)
cnr.Encode(rawCID)
tid, _ := cmd.Flags().GetString(treeIDFlagKey)
nid, _ := cmd.Flags().GetUint64(nodeIDFlagKey)
var bt []byte
if t := common.ReadBearerToken(cmd, bearerFlagKey); t != nil {
bt = t.Marshal()
}
req := &tree.RemoveRequest{
Body: &tree.RemoveRequest_Body{
ContainerId: rawCID,
TreeId: tid,
NodeId: nid,
BearerToken: bt,
},
}
commonCmd.ExitOnErr(cmd, "signing message: %w", tree.SignMessage(req, pk))
_, err = cli.Remove(ctx, req)
commonCmd.ExitOnErr(cmd, "failed to call remove: %w", err)
common.PrintVerbose(cmd, "Successful remove invocation.")
}

View file

@ -15,16 +15,28 @@ func init() {
Cmd.AddCommand(getByPathCmd)
Cmd.AddCommand(addByPathCmd)
Cmd.AddCommand(listCmd)
Cmd.AddCommand(healthcheckCmd)
Cmd.AddCommand(moveCmd)
Cmd.AddCommand(removeCmd)
Cmd.AddCommand(getSubtreeCmd)
Cmd.AddCommand(getOpLogCmd)
initAddCmd()
initGetByPathCmd()
initAddByPathCmd()
initListCmd()
initHealthcheckCmd()
initMoveCmd()
initRemoveCmd()
initGetSubtreeCmd()
initGetOpLogCmd()
}
const (
treeIDFlagKey = "tid"
parentIDFlagKey = "pid"
nodeIDFlagKey = "nid"
rootIDFlagKey = "root"
metaFlagKey = "meta"
@ -34,6 +46,10 @@ const (
latestOnlyFlagKey = "latest"
bearerFlagKey = "bearer"
heightFlagKey = "height"
countFlagKey = "count"
depthFlagKey = "depth"
)
func initCTID(cmd *cobra.Command) {
@ -45,5 +61,5 @@ func initCTID(cmd *cobra.Command) {
ff.String(treeIDFlagKey, "", "Tree ID")
_ = cmd.MarkFlagRequired(treeIDFlagKey)
ff.StringP(bearerFlagKey, "", "", "Path to bearer token")
ff.String(bearerFlagKey, "", "Path to bearer token")
}

View file

@ -0,0 +1,101 @@
package tree
import (
"crypto/sha256"
"errors"
"io"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"github.com/spf13/cobra"
)
var getSubtreeCmd = &cobra.Command{
Use: "get-subtree",
Short: "Get subtree",
Run: getSubTree,
PersistentPreRun: func(cmd *cobra.Command, _ []string) {
commonflags.Bind(cmd)
},
}
func initGetSubtreeCmd() {
commonflags.Init(getSubtreeCmd)
initCTID(getSubtreeCmd)
ff := getSubtreeCmd.Flags()
ff.Uint64(rootIDFlagKey, 0, "Root ID to traverse from.")
ff.Uint32(depthFlagKey, 10, "Traversal depth.")
_ = getSubtreeCmd.MarkFlagRequired(commonflags.CIDFlag)
_ = getSubtreeCmd.MarkFlagRequired(treeIDFlagKey)
_ = cobra.MarkFlagRequired(ff, commonflags.RPC)
}
func getSubTree(cmd *cobra.Command, _ []string) {
pk := key.GetOrGenerate(cmd)
cidString, _ := cmd.Flags().GetString(commonflags.CIDFlag)
var cnr cid.ID
err := cnr.DecodeString(cidString)
commonCmd.ExitOnErr(cmd, "decode container ID string: %w", err)
ctx := cmd.Context()
cli, err := _client(ctx)
commonCmd.ExitOnErr(cmd, "failed to create client: %w", err)
rawCID := make([]byte, sha256.Size)
cnr.Encode(rawCID)
tid, _ := cmd.Flags().GetString(treeIDFlagKey)
rid, _ := cmd.Flags().GetUint64(rootIDFlagKey)
depth, _ := cmd.Flags().GetUint32(depthFlagKey)
var bt []byte
if t := common.ReadBearerToken(cmd, bearerFlagKey); t != nil {
bt = t.Marshal()
}
req := &tree.GetSubTreeRequest{
Body: &tree.GetSubTreeRequest_Body{
ContainerId: rawCID,
TreeId: tid,
RootId: rid,
Depth: depth,
BearerToken: bt,
},
}
commonCmd.ExitOnErr(cmd, "signing message: %w", tree.SignMessage(req, pk))
resp, err := cli.GetSubTree(ctx, req)
commonCmd.ExitOnErr(cmd, "failed to call getSubTree: %w", err)
subtreeResp, err := resp.Recv()
for ; err == nil; subtreeResp, err = resp.Recv() {
b := subtreeResp.GetBody()
cmd.Printf("Node ID: %d\n", b.GetNodeId())
cmd.Println("\tParent ID: ", b.GetParentId())
cmd.Println("\tTimestamp: ", b.GetTimestamp())
if meta := b.GetMeta(); len(meta) > 0 {
cmd.Println("\tMeta pairs: ")
for _, kv := range meta {
cmd.Printf("\t\t%s: %s\n", kv.GetKey(), string(kv.GetValue()))
}
}
}
if !errors.Is(err, io.EOF) {
commonCmd.ExitOnErr(cmd, "rpc call: %w", err)
}
}

View file

@ -3,7 +3,7 @@ package blobovnicza
import (
common "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/spf13/cobra"
)
@ -38,7 +38,7 @@ func inspectFunc(cmd *cobra.Command, _ []string) {
data := res.Object()
var o object.Object
var o objectSDK.Object
common.ExitOnErr(cmd, common.Errf("could not unmarshal object: %w",
o.Unmarshal(data)),
)

View file

@ -8,6 +8,7 @@ const (
flagAddress = "address"
flagEnginePath = "path"
flagOutFile = "out"
flagDBType = "dbtype"
)
// AddAddressFlag adds the address flag to the passed cobra command.
@ -33,3 +34,9 @@ func AddOutputFileFlag(cmd *cobra.Command, v *string) {
"File to save object payload")
_ = cmd.MarkFlagFilename(flagOutFile)
}
// AddDBTypeFlag adds the DB type flag to the passed cobra command.
func AddDBTypeFlag(cmd *cobra.Command, v *string) {
cmd.Flags().StringVar(v, flagOutFile, "bbolt",
"Type of DB used by write cache (default: bbolt)")
}

View file

@ -7,7 +7,7 @@ import (
common "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/spf13/cobra"
)
@ -49,7 +49,7 @@ func inspectFunc(cmd *cobra.Command, _ []string) {
prm.SetAddress(addr)
prm.SetRaw(true)
siErr := new(object.SplitInfoError)
siErr := new(objectSDK.SplitInfoError)
res, err := db.Get(cmd.Context(), prm)
if errors.As(err, &siErr) {

View file

@ -4,14 +4,14 @@ import (
"os"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/spf13/cobra"
)
// PrintObjectHeader prints passed object's header fields via
// the passed cobra command. Does nothing with the payload.
func PrintObjectHeader(cmd *cobra.Command, h object.Object) {
func PrintObjectHeader(cmd *cobra.Command, h objectSDK.Object) {
cmd.Println("Version:", h.Version())
cmd.Println("Type:", h.Type())
printContainerID(cmd, h.ContainerID)

View file

@ -1,9 +1,14 @@
package writecache
import (
"fmt"
"os"
common "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache/writecachebadger"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache/writecachebbolt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"github.com/spf13/cobra"
)
@ -18,16 +23,36 @@ func init() {
common.AddAddressFlag(inspectCMD, &vAddress)
common.AddComponentPathFlag(inspectCMD, &vPath)
common.AddOutputFileFlag(inspectCMD, &vOut)
common.AddDBTypeFlag(inspectCMD, &vDBType)
}
func inspectFunc(cmd *cobra.Command, _ []string) {
db := openWC(cmd)
defer db.Close()
var data []byte
data, err := writecache.Get(db, []byte(vAddress))
common.ExitOnErr(cmd, common.Errf("could not fetch object: %w", err))
switch vDBType {
case "bbolt":
db, err := writecachebbolt.OpenDB(vPath, true, os.OpenFile)
common.ExitOnErr(cmd, common.Errf("could not open write-cache db: %w", err))
defer db.Close()
var o object.Object
data, err = writecachebbolt.Get(db, []byte(vAddress))
common.ExitOnErr(cmd, common.Errf("could not fetch object: %w", err))
case "badger":
log, err := logger.NewLogger(&logger.Prm{})
common.ExitOnErr(cmd, common.Errf("could not create logger: %w", err))
db, err := writecachebadger.OpenDB(vPath, true, log)
common.ExitOnErr(cmd, common.Errf("could not open write-cache db: %w", err))
data, err = writecachebadger.Get(db, []byte(vAddress))
common.ExitOnErr(cmd, common.Errf("could not fetch object: %w", err))
default:
common.ExitOnErr(cmd, fmt.Errorf("invalid dbtype: %q (possible values: bbolt, badger)", vDBType))
}
var o objectSDK.Object
common.ExitOnErr(cmd, common.Errf("could not unmarshal object: %w", o.Unmarshal(data)))
common.PrintObjectHeader(cmd, o)

View file

@ -3,9 +3,12 @@ package writecache
import (
"fmt"
"io"
"os"
common "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache/writecachebadger"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache/writecachebbolt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/spf13/cobra"
)
@ -30,9 +33,26 @@ func listFunc(cmd *cobra.Command, _ []string) {
return err
}
db := openWC(cmd)
defer db.Close()
switch vDBType {
case "bbolt":
db, err := writecachebbolt.OpenDB(vPath, true, os.OpenFile)
common.ExitOnErr(cmd, common.Errf("could not open write-cache db: %w", err))
defer db.Close()
err := writecache.IterateDB(db, wAddr)
common.ExitOnErr(cmd, common.Errf("write-cache iterator failure: %w", err))
err = writecachebbolt.IterateDB(db, wAddr)
common.ExitOnErr(cmd, common.Errf("write-cache iterator failure: %w", err))
case "badger":
log, err := logger.NewLogger(&logger.Prm{})
common.ExitOnErr(cmd, common.Errf("could not create logger: %w", err))
db, err := writecachebadger.OpenDB(vPath, true, log)
common.ExitOnErr(cmd, common.Errf("could not open write-cache db: %w", err))
err = writecachebadger.IterateDB(db, wAddr)
common.ExitOnErr(cmd, common.Errf("write-cache iterator failure: %w", err))
default:
common.ExitOnErr(cmd, fmt.Errorf("invalid dbtype: %q (possible values: bbolt, badger)", vDBType))
}
}

View file

@ -1,18 +1,14 @@
package writecache
import (
"os"
common "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
"github.com/spf13/cobra"
"go.etcd.io/bbolt"
)
var (
vAddress string
vPath string
vOut string
vDBType string
)
// Root contains `write-cache` command definition.
@ -24,10 +20,3 @@ var Root = &cobra.Command{
func init() {
Root.AddCommand(listCMD, inspectCMD)
}
func openWC(cmd *cobra.Command) *bbolt.DB {
db, err := writecache.OpenDB(vPath, true, os.OpenFile)
common.ExitOnErr(cmd, common.Errf("could not open write-cache db: %w", err))
return db
}

View file

@ -38,7 +38,7 @@ func init() {
blobovnicza.Root,
meta.Root,
writecache.Root,
gendoc.Command(command),
gendoc.Command(command, gendoc.Options{}),
)
}

View file

@ -8,6 +8,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
putsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/put"
utilSync "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/sync"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
@ -24,59 +25,6 @@ type valueWithTime[V any] struct {
e error
}
type locker struct {
mtx sync.Mutex
waiters int // not protected by mtx, must used outer mutex to update concurrently
}
type keyLocker[K comparable] struct {
lockers map[K]*locker
lockersMtx sync.Mutex
}
func newKeyLocker[K comparable]() *keyLocker[K] {
return &keyLocker[K]{
lockers: make(map[K]*locker),
}
}
func (l *keyLocker[K]) LockKey(key K) {
l.lockersMtx.Lock()
if locker, found := l.lockers[key]; found {
locker.waiters++
l.lockersMtx.Unlock()
locker.mtx.Lock()
return
}
locker := &locker{
waiters: 1,
}
locker.mtx.Lock()
l.lockers[key] = locker
l.lockersMtx.Unlock()
}
func (l *keyLocker[K]) UnlockKey(key K) {
l.lockersMtx.Lock()
defer l.lockersMtx.Unlock()
locker, found := l.lockers[key]
if !found {
return
}
if locker.waiters == 1 {
delete(l.lockers, key)
}
locker.waiters--
locker.mtx.Unlock()
}
// entity that provides TTL cache interface.
type ttlNetCache[K comparable, V any] struct {
ttl time.Duration
@ -87,7 +35,7 @@ type ttlNetCache[K comparable, V any] struct {
netRdr netValueReader[K, V]
keyLocker *keyLocker[K]
keyLocker *utilSync.KeyLocker[K]
}
// complicates netValueReader with TTL caching mechanism.
@ -100,7 +48,7 @@ func newNetworkTTLCache[K comparable, V any](sz int, ttl time.Duration, netRdr n
sz: sz,
cache: cache,
netRdr: netRdr,
keyLocker: newKeyLocker[K](),
keyLocker: utilSync.NewKeyLocker[K](),
}
}
@ -115,8 +63,8 @@ func (c *ttlNetCache[K, V]) get(key K) (V, error) {
return val.v, val.e
}
c.keyLocker.LockKey(key)
defer c.keyLocker.UnlockKey(key)
c.keyLocker.Lock(key)
defer c.keyLocker.Unlock(key)
val, ok = c.cache.Peek(key)
if ok && time.Since(val.t) < c.ttl {
@ -135,8 +83,8 @@ func (c *ttlNetCache[K, V]) get(key K) (V, error) {
}
func (c *ttlNetCache[K, V]) set(k K, v V, e error) {
c.keyLocker.LockKey(k)
defer c.keyLocker.UnlockKey(k)
c.keyLocker.Lock(k)
defer c.keyLocker.Unlock(k)
c.cache.Add(k, &valueWithTime[V]{
v: v,
@ -146,8 +94,8 @@ func (c *ttlNetCache[K, V]) set(k K, v V, e error) {
}
func (c *ttlNetCache[K, V]) remove(key K) {
c.keyLocker.LockKey(key)
defer c.keyLocker.UnlockKey(key)
c.keyLocker.Lock(key)
defer c.keyLocker.Unlock(key)
c.cache.Remove(key)
}
@ -208,7 +156,7 @@ func newCachedContainerStorage(v container.Source, ttl time.Duration) ttlContain
}
func (s ttlContainerStorage) handleRemoval(cnr cid.ID) {
s.set(cnr, nil, apistatus.ContainerNotFound{})
s.set(cnr, nil, new(apistatus.ContainerNotFound))
}
// Get returns container value from the cache. If value is missing in the cache

View file

@ -40,7 +40,9 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
shardmode "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
writecacheconfig "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache/config"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache/writecachebadger"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache/writecachebbolt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/metrics"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
containerClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
@ -127,6 +129,7 @@ type shardCfg struct {
writecacheCfg struct {
enabled bool
typ writecacheconfig.Type
path string
maxBatchSize int
maxBatchDelay time.Duration
@ -135,6 +138,7 @@ type shardCfg struct {
flushWorkerCount int
sizeLimit uint64
noSync bool
gcInterval time.Duration
}
piloramaCfg struct {
@ -238,6 +242,7 @@ func (a *applicationConfiguration) setShardWriteCacheConfig(newConfig *shardCfg,
wc := &newConfig.writecacheCfg
wc.enabled = true
wc.typ = writeCacheCfg.Type()
wc.path = writeCacheCfg.Path()
wc.maxBatchSize = writeCacheCfg.BoltDB().MaxBatchSize()
wc.maxBatchDelay = writeCacheCfg.BoltDB().MaxBatchDelay()
@ -246,6 +251,7 @@ func (a *applicationConfiguration) setShardWriteCacheConfig(newConfig *shardCfg,
wc.flushWorkerCount = writeCacheCfg.WorkersNumber()
wc.sizeLimit = writeCacheCfg.SizeLimit()
wc.noSync = writeCacheCfg.NoSync()
wc.gcInterval = writeCacheCfg.GCInterval()
}
}
@ -337,7 +343,8 @@ type internals struct {
apiVersion version.Version
healthStatus *atomic.Int32
// is node under maintenance
isMaintenance atomic.Bool
isMaintenance atomic.Bool
alreadyBootstraped bool
}
// starts node's maintenance.
@ -439,6 +446,8 @@ type cfgGRPC struct {
servers []*grpc.Server
endpoints []string
maxChunkSize uint64
maxAddrAmount uint64
@ -621,7 +630,7 @@ func initShared(appCfg *config.Config, key *keys.PrivateKey, netState *networkSt
key: key,
binPublicKey: key.PublicKey().Bytes(),
localAddr: netAddr,
respSvc: response.NewService(response.WithNetworkState(netState)),
respSvc: response.NewService(netState),
clientCache: cache.NewSDKClientCache(cacheOpts),
bgClientCache: cache.NewSDKClientCache(cacheOpts),
putClientCache: cache.NewSDKClientCache(cacheOpts),
@ -703,20 +712,36 @@ func (c *cfg) shardOpts() []shardOptsWithID {
return shards
}
func (c *cfg) getWriteCacheOpts(shCfg shardCfg) []writecache.Option {
var writeCacheOpts []writecache.Option
func (c *cfg) getWriteCacheOpts(shCfg shardCfg) writecacheconfig.Options {
var writeCacheOpts writecacheconfig.Options
if wcRead := shCfg.writecacheCfg; wcRead.enabled {
writeCacheOpts = append(writeCacheOpts,
writecache.WithPath(wcRead.path),
writecache.WithMaxBatchSize(wcRead.maxBatchSize),
writecache.WithMaxBatchDelay(wcRead.maxBatchDelay),
writecache.WithMaxObjectSize(wcRead.maxObjSize),
writecache.WithSmallObjectSize(wcRead.smallObjectSize),
writecache.WithFlushWorkersCount(wcRead.flushWorkerCount),
writecache.WithMaxCacheSize(wcRead.sizeLimit),
writecache.WithNoSync(wcRead.noSync),
writecache.WithLogger(c.log),
)
switch wcRead.typ {
case writecacheconfig.TypeBBolt:
writeCacheOpts.Type = writecacheconfig.TypeBBolt
writeCacheOpts.BBoltOptions = append(writeCacheOpts.BBoltOptions,
writecachebbolt.WithPath(wcRead.path),
writecachebbolt.WithMaxBatchSize(wcRead.maxBatchSize),
writecachebbolt.WithMaxBatchDelay(wcRead.maxBatchDelay),
writecachebbolt.WithMaxObjectSize(wcRead.maxObjSize),
writecachebbolt.WithSmallObjectSize(wcRead.smallObjectSize),
writecachebbolt.WithFlushWorkersCount(wcRead.flushWorkerCount),
writecachebbolt.WithMaxCacheSize(wcRead.sizeLimit),
writecachebbolt.WithNoSync(wcRead.noSync),
writecachebbolt.WithLogger(c.log),
)
case writecacheconfig.TypeBadger:
writeCacheOpts.Type = writecacheconfig.TypeBadger
writeCacheOpts.BadgerOptions = append(writeCacheOpts.BadgerOptions,
writecachebadger.WithPath(wcRead.path),
writecachebadger.WithMaxObjectSize(wcRead.maxObjSize),
writecachebadger.WithFlushWorkersCount(wcRead.flushWorkerCount),
writecachebadger.WithMaxCacheSize(wcRead.sizeLimit),
writecachebadger.WithLogger(c.log),
writecachebadger.WithGCInterval(wcRead.gcInterval),
)
default:
panic(fmt.Sprintf("unknown writecache type: %q", wcRead.typ))
}
}
return writeCacheOpts
}
@ -835,7 +860,7 @@ func (c *cfg) getShardOpts(shCfg shardCfg) shardOptsWithID {
shard.WithMetaBaseOptions(mbOptions...),
shard.WithPiloramaOptions(piloramaOpts...),
shard.WithWriteCache(shCfg.writecacheCfg.enabled),
shard.WithWriteCacheOptions(writeCacheOpts...),
shard.WithWriteCacheOptions(writeCacheOpts),
shard.WithRemoverBatchSize(shCfg.gcCfg.removerBatchSize),
shard.WithGCRemoverSleepInterval(shCfg.gcCfg.removerSleepInterval),
shard.WithExpiredCollectorBatchSize(shCfg.gcCfg.expiredCollectorBatchSize),
@ -996,13 +1021,6 @@ func (c *cfg) needBootstrap() bool {
return c.cfgNetmap.needBootstrap
}
// ObjectServiceLoad implements system loader interface for policer component.
// It is calculated as size/capacity ratio of "remote object put" worker.
// Returns float value between 0.0 and 1.0.
func (c *cfg) ObjectServiceLoad() float64 {
return float64(c.cfgObject.pool.putRemote.Running()) / float64(c.cfgObject.pool.putRemoteCapacity)
}
type dCmp struct {
name string
reloadFunc func() error
@ -1061,6 +1079,10 @@ func (c *cfg) reloadConfig(ctx context.Context) {
}
components = append(components, dCmp{"logger", logPrm.Reload})
components = append(components, dCmp{"runtime", func() error {
setRuntimeParameters(c)
return nil
}})
components = append(components, dCmp{"tracing", func() error {
updated, err := tracing.Setup(ctx, *tracingconfig.ToTracingConfig(c.appCfg))
if updated {

View file

@ -199,7 +199,7 @@ func parseSizeInBytes(sizeStr string) uint64 {
if sizeStr[lastChar] == 'b' || sizeStr[lastChar] == 'B' {
lastChar--
}
if lastChar > 0 {
if lastChar >= 0 {
switch unicode.ToLower(rune(sizeStr[lastChar])) {
case 'k':
multiplier = 1 << 10

View file

@ -125,10 +125,15 @@ func TestSizeInBytes(t *testing.T) {
)
configtest.ForEachFileType("test/config", func(c *config.Config) {
c = c.Sub("sizes")
require.EqualValues(t, 1, config.SizeInBytesSafe(c, "size_b"))
require.EqualValues(t, kb, config.SizeInBytesSafe(c, "size_k"))
require.EqualValues(t, kb, config.SizeInBytesSafe(c, "size_kb"))
require.EqualValues(t, 2*kb, config.SizeInBytesSafe(c, "size_kb_no_space"))
require.EqualValues(t, 12*mb, config.SizeInBytesSafe(c, "size_m"))
require.EqualValues(t, 12*mb, config.SizeInBytesSafe(c, "size_mb"))
require.EqualValues(t, 4*gb, config.SizeInBytesSafe(c, "size_g"))
require.EqualValues(t, 4*gb, config.SizeInBytesSafe(c, "size_gb"))
require.EqualValues(t, 5*tb, config.SizeInBytesSafe(c, "size_t"))
require.EqualValues(t, 5*tb, config.SizeInBytesSafe(c, "size_tb"))
require.EqualValues(t, 12, config.SizeInBytesSafe(c, "size_i_am_not_very_clever"))
require.EqualValues(t, tb/2, config.SizeInBytesSafe(c, "size_float"))

View file

@ -1,8 +1,12 @@
package writecacheconfig
import (
"fmt"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
boltdbconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/boltdb"
writecacheconfig "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache/config"
)
// Config is a wrapper over the config section
@ -21,6 +25,9 @@ const (
// SizeLimitDefault is a default write-cache size limit.
SizeLimitDefault = 1 << 30
// DefaultGCInterval is the default duration of the GC cycle interval.
DefaultGCInterval = 1 * time.Minute
)
// From wraps config section into Config.
@ -35,6 +42,22 @@ func (x *Config) Enabled() bool {
return config.Bool((*config.Config)(x), "enabled")
}
// Type returns the writecache implementation type to use.
//
// Panics if the type is not recognized.
func (x *Config) Type() writecacheconfig.Type {
t := config.String((*config.Config)(x), "type")
switch t {
case "bbolt", "":
return writecacheconfig.TypeBBolt
case "badger":
return writecacheconfig.TypeBadger
}
panic(fmt.Sprintf("invalid writecache type: %q", t))
}
// Path returns the value of "path" config parameter.
//
// Panics if the value is not a non-empty string.
@ -126,3 +149,16 @@ func (x *Config) NoSync() bool {
func (x *Config) BoltDB() *boltdbconfig.Config {
return (*boltdbconfig.Config)(x)
}
// GCInterval returns the value of "gc_interval" config parameter.
//
// Returns DefaultGCInterval if the value is not a positive duration.
func (x *Config) GCInterval() time.Duration {
d := config.DurationSafe((*config.Config)(x), "gc_interval")
if d > 0 {
return d
}
return DefaultGCInterval
}

View file

@ -0,0 +1,23 @@
package runtime
import (
"math"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
)
const (
subsection = "runtime"
memoryLimitDefault = math.MaxInt64
)
// GCMemoryLimitBytes returns the value of "soft_memory_limit" config parameter from "runtime" section.
func GCMemoryLimitBytes(c *config.Config) int64 {
l := config.SizeInBytesSafe(c.Sub(subsection), "soft_memory_limit")
if l > 0 {
return int64(l)
}
return memoryLimitDefault
}

View file

@ -0,0 +1,30 @@
package runtime
import (
"math"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test"
"github.com/stretchr/testify/require"
)
func TestGCMemoryLimit(t *testing.T) {
t.Run("defaults", func(t *testing.T) {
empty := configtest.EmptyConfig()
require.Equal(t, int64(math.MaxInt64), GCMemoryLimitBytes(empty))
})
const path = "../../../../config/example/node"
fileConfigTest := func(c *config.Config) {
require.Equal(t, int64(1073741824), GCMemoryLimitBytes(c))
}
configtest.ForEachFileType(path, fileConfigTest)
t.Run("ENV", func(t *testing.T) {
configtest.ForEnvFileType(t, path, fileConfigTest)
})
}

View file

@ -49,10 +49,15 @@
},
"sizes": {
"size_b": "1b",
"size_k": "1 k",
"size_kb": "1 kb",
"size_kb_no_space": "2kb",
"size_mb": "12m",
"size_gb": "4g",
"size_m": "12m",
"size_mb": "12mb",
"size_g": "4g",
"size_gb": "4gb",
"size_t": "5 T",
"size_tb": "5 TB",
"size_float": ".5t",
"size_float_big": "14.123 gb",

View file

@ -42,10 +42,15 @@ bool:
incorrect: not true
sizes:
size_b: 1b
size_k: 1 k
size_kb: 1 kb
size_kb_no_space: 2kb
size_mb: 12m
size_gb: 4g
size_m: 12m
size_mb: 12mb
size_g: 4g
size_gb: 4gb
size_t: 5 T
size_tb: 5 TB
size_float: .5t
size_float_big: 14.123 gb

View file

@ -7,6 +7,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test"
treeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/tree"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/stretchr/testify/require"
)
@ -21,10 +22,19 @@ func TestTreeSection(t *testing.T) {
require.Equal(t, 0, treeSec.ReplicationChannelCapacity())
require.Equal(t, 0, treeSec.ReplicationWorkerCount())
require.Equal(t, time.Duration(0), treeSec.ReplicationTimeout())
require.Equal(t, 0, len(treeSec.AuthorizedKeys()))
})
const path = "../../../../config/example/node"
var expectedKeys keys.PublicKeys
key, err := keys.NewPublicKeyFromString("0397d207ea77909f7d66fa6f36d08daae22ace672be7ea4f53513484dde8a142a0")
require.NoError(t, err)
expectedKeys = append(expectedKeys, key)
key, err = keys.NewPublicKeyFromString("02053819235c20d784132deba10bb3061629e3a5c819a039ef091841d9d35dad56")
require.NoError(t, err)
expectedKeys = append(expectedKeys, key)
var fileConfigTest = func(c *config.Config) {
treeSec := treeconfig.Tree(c)
@ -34,6 +44,7 @@ func TestTreeSection(t *testing.T) {
require.Equal(t, 32, treeSec.ReplicationWorkerCount())
require.Equal(t, 5*time.Second, treeSec.ReplicationTimeout())
require.Equal(t, time.Hour, treeSec.SyncInterval())
require.Equal(t, expectedKeys, treeSec.AuthorizedKeys())
}
configtest.ForEachFileType(path, fileConfigTest)

View file

@ -69,11 +69,14 @@ func initGRPC(c *cfg) {
lis, err := net.Listen("tcp", sc.Endpoint())
if err != nil {
c.metricsCollector.GrpcServerMetrics().MarkUnhealthy(sc.Endpoint())
c.log.Error(logs.FrostFSNodeCantListenGRPCEndpoint, zap.Error(err))
return
}
c.metricsCollector.GrpcServerMetrics().MarkHealthy(sc.Endpoint())
c.cfgGRPC.listeners = append(c.cfgGRPC.listeners, lis)
c.cfgGRPC.endpoints = append(c.cfgGRPC.endpoints, sc.Endpoint())
srv := grpc.NewServer(serverOpts...)
@ -96,21 +99,23 @@ func serveGRPC(c *cfg) {
srv := c.cfgGRPC.servers[i]
lis := c.cfgGRPC.listeners[i]
endpoint := c.cfgGRPC.endpoints[i]
go func() {
defer func() {
c.log.Info(logs.FrostFSNodeStopListeningGRPCEndpoint,
zap.String("endpoint", lis.Addr().String()),
zap.Stringer("endpoint", lis.Addr()),
)
c.wg.Done()
}()
c.log.Info(logs.FrostFSNodeStartListeningGRPCEndpoint,
zap.String("endpoint", lis.Addr().String()),
zap.Stringer("endpoint", lis.Addr()),
)
if err := srv.Serve(lis); err != nil {
c.metricsCollector.GrpcServerMetrics().MarkUnhealthy(endpoint)
fmt.Println("gRPC server error", err)
}
}()

View file

@ -26,3 +26,7 @@ func (it *keySpaceIterator) Next(ctx context.Context, batchSize uint32) ([]objec
it.cur = res.Cursor()
return res.AddressList(), nil
}
func (it *keySpaceIterator) Rewind() {
it.cur = nil
}

View file

@ -84,6 +84,7 @@ func initApp(ctx context.Context, c *cfg) {
c.wg.Done()
}()
setRuntimeParameters(c)
metrics, _ := metricsComponent(c)
initAndLog(c, "profiler", initProfilerService)
initAndLog(c, metrics.name, metrics.init)

View file

@ -227,6 +227,10 @@ func bootstrapNode(c *cfg) {
c.log.Info(logs.FrostFSNodeNodeIsUnderMaintenanceSkipInitialBootstrap)
return
}
if c.alreadyBootstraped {
c.log.Info(logs.NetmapNodeAlreadyInCandidateListOnlineSkipInitialBootstrap)
return
}
err := c.bootstrap()
fatalOnErrDetails("bootstrap error", err)
}
@ -258,7 +262,8 @@ func initNetmapState(c *cfg) {
epoch, err := c.cfgNetmap.wrapper.Epoch()
fatalOnErrDetails("could not initialize current epoch number", err)
ni, err := c.netmapInitLocalNodeState(epoch)
var ni *netmapSDK.NodeInfo
ni, c.alreadyBootstraped, err = c.netmapInitLocalNodeState(epoch)
fatalOnErrDetails("could not init network state", err)
stateWord := nodeState(ni)
@ -277,6 +282,13 @@ func initNetmapState(c *cfg) {
c.handleLocalNodeInfo(ni)
}
func sameNodeInfo(a, b *netmapSDK.NodeInfo) bool {
// Suboptimal, but we do this once on the node startup.
rawA := a.Marshal()
rawB := b.Marshal()
return bytes.Equal(rawA, rawB)
}
func nodeState(ni *netmapSDK.NodeInfo) string {
if ni != nil {
switch {
@ -291,27 +303,29 @@ func nodeState(ni *netmapSDK.NodeInfo) string {
return "undefined"
}
func (c *cfg) netmapInitLocalNodeState(epoch uint64) (*netmapSDK.NodeInfo, error) {
func (c *cfg) netmapInitLocalNodeState(epoch uint64) (*netmapSDK.NodeInfo, bool, error) {
nmNodes, err := c.cfgNetmap.wrapper.GetCandidates()
if err != nil {
return nil, err
return nil, false, err
}
var candidate *netmapSDK.NodeInfo
alreadyBootstraped := false
for i := range nmNodes {
if bytes.Equal(nmNodes[i].PublicKey(), c.binPublicKey) {
candidate = &nmNodes[i]
alreadyBootstraped = candidate.IsOnline() && sameNodeInfo(&c.cfgNodeInfo.localInfo, candidate)
break
}
}
node, err := c.netmapLocalNodeState(epoch)
if err != nil {
return nil, err
return nil, false, err
}
if candidate == nil {
return node, nil
return node, false, nil
}
nmState := nodeState(node)
@ -319,11 +333,11 @@ func (c *cfg) netmapInitLocalNodeState(epoch uint64) (*netmapSDK.NodeInfo, error
if nmState != candidateState {
// This happens when the node was switched to maintenance without epoch tick.
// We expect it to continue staying in maintenance.
c.log.Info("candidate status is different from the netmap status, the former takes priority",
c.log.Info(logs.CandidateStatusPriority,
zap.String("netmap", nmState),
zap.String("candidate", candidateState))
}
return candidate, nil
return candidate, alreadyBootstraped, nil
}
func (c *cfg) netmapLocalNodeState(epoch uint64) (*netmapSDK.NodeInfo, error) {
@ -463,8 +477,6 @@ func (n *netInfo) Dump(ver version.Version) (*netmapSDK.NetworkInfo, error) {
ni.SetMsPerBlock(msPerBlock)
ni.SetMaxObjectSize(netInfoMorph.MaxObjectSize)
ni.SetStoragePrice(netInfoMorph.StoragePrice)
ni.SetAuditFee(netInfoMorph.AuditFee)
ni.SetEpochDuration(netInfoMorph.EpochDuration)
ni.SetContainerFee(netInfoMorph.ContainerFee)
ni.SetNamedContainerFee(netInfoMorph.ContainerAliasFee)

View file

@ -70,6 +70,10 @@ func (s *objectSvc) Put() (objectService.PutObjectStream, error) {
return s.put.Put()
}
func (s *objectSvc) PutSingle(ctx context.Context, req *object.PutSingleRequest) (*object.PutSingleResponse, error) {
return s.put.PutSingle(ctx, req)
}
func (s *objectSvc) Head(ctx context.Context, req *object.HeadRequest) (*object.HeadResponse, error) {
return s.get.Head(ctx, req)
}
@ -255,7 +259,6 @@ func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.Cl
}),
policer.WithMaxCapacity(c.cfgObject.pool.replicatorPoolSize),
policer.WithPool(c.cfgObject.pool.replication),
policer.WithNodeLoader(c),
)
c.workers = append(c.workers, worker{
@ -309,48 +312,40 @@ func createPutSvc(c *cfg, keyStorage *util.KeyStorage) *putsvc.Service {
}
return putsvc.NewService(
putsvc.WithKeyStorage(keyStorage),
putsvc.WithClientConstructor(c.putClientCache),
putsvc.WithMaxSizeSource(newCachedMaxObjectSizeSource(c)),
putsvc.WithObjectStorage(os),
putsvc.WithContainerSource(c.cfgObject.cnrSource),
putsvc.WithNetworkMapSource(c.netMapSource),
putsvc.WithNetmapKeys(c),
putsvc.WithNetworkState(c.cfgNetmap.state),
keyStorage,
c.putClientCache,
newCachedMaxObjectSizeSource(c),
os,
c.cfgObject.cnrSource,
c.netMapSource,
c,
c.cfgNetmap.state,
putsvc.WithWorkerPools(c.cfgObject.pool.putRemote, c.cfgObject.pool.putLocal),
putsvc.WithLogger(c.log),
)
}
func createPutSvcV2(sPut *putsvc.Service, keyStorage *util.KeyStorage) *putsvcV2.Service {
return putsvcV2.NewService(
putsvcV2.WithInternalService(sPut),
putsvcV2.WithKeyStorage(keyStorage),
)
return putsvcV2.NewService(sPut, keyStorage)
}
func createSearchSvc(c *cfg, keyStorage *util.KeyStorage, traverseGen *util.TraverserGenerator, coreConstructor *cache.ClientCache) *searchsvc.Service {
ls := c.cfgObject.cfgLocalStorage.localStorage
return searchsvc.New(
searchsvc.WithLogger(c.log),
searchsvc.WithLocalStorageEngine(ls),
searchsvc.WithClientConstructor(coreConstructor),
searchsvc.WithTraverserGenerator(
traverseGen.WithTraverseOptions(
placement.WithoutSuccessTracking(),
),
ls,
coreConstructor,
traverseGen.WithTraverseOptions(
placement.WithoutSuccessTracking(),
),
searchsvc.WithNetMapSource(c.netMapSource),
searchsvc.WithKeyStorage(keyStorage),
c.netMapSource,
keyStorage,
searchsvc.WithLogger(c.log),
)
}
func createSearchSvcV2(sSearch *searchsvc.Service, keyStorage *util.KeyStorage) *searchsvcV2.Service {
return searchsvcV2.NewService(
searchsvcV2.WithInternalService(sSearch),
searchsvcV2.WithKeyStorage(keyStorage),
)
return searchsvcV2.NewService(sSearch, keyStorage)
}
func createGetService(c *cfg, keyStorage *util.KeyStorage, traverseGen *util.TraverserGenerator,
@ -378,24 +373,22 @@ func createGetServiceV2(sGet *getsvc.Service, keyStorage *util.KeyStorage) *gets
func createDeleteService(c *cfg, keyStorage *util.KeyStorage, sGet *getsvc.Service,
sSearch *searchsvc.Service, sPut *putsvc.Service) *deletesvc.Service {
return deletesvc.New(
deletesvc.WithLogger(c.log),
deletesvc.WithHeadService(sGet),
deletesvc.WithSearchService(sSearch),
deletesvc.WithPutService(sPut),
deletesvc.WithNetworkInfo(&delNetInfo{
sGet,
sSearch,
sPut,
&delNetInfo{
State: c.cfgNetmap.state,
tsLifetime: c.cfgObject.tombstoneLifetime,
cfg: c,
}),
deletesvc.WithKeyStorage(keyStorage),
},
keyStorage,
deletesvc.WithLogger(c.log),
)
}
func createDeleteServiceV2(sDelete *deletesvc.Service) *deletesvcV2.Service {
return deletesvcV2.NewService(
deletesvcV2.WithInternalService(sDelete),
)
return deletesvcV2.NewService(sDelete)
}
func createSplitService(c *cfg, sPutV2 *putsvcV2.Service, sGetV2 *getsvcV2.Service,
@ -417,21 +410,16 @@ func createACLServiceV2(c *cfg, splitSvc *objectService.TransportSplitter) v2.Se
irFetcher := createInnerRingFetcher(c)
return v2.New(
splitSvc,
c.netMapSource,
newCachedIRFetcher(irFetcher),
acl.NewChecker(
c.cfgNetmap.state,
c.cfgObject.eaclSource,
eaclSDK.NewValidator(),
ls),
c.cfgObject.cnrSource,
v2.WithLogger(c.log),
v2.WithIRFetcher(newCachedIRFetcher(irFetcher)),
v2.WithNetmapSource(c.netMapSource),
v2.WithContainerSource(
c.cfgObject.cnrSource,
),
v2.WithNextService(splitSvc),
v2.WithEACLChecker(
acl.NewChecker(new(acl.CheckerPrm).
SetNetmapState(c.cfgNetmap.state).
SetEACLSource(c.cfgObject.eaclSource).
SetValidator(eaclSDK.NewValidator()).
SetLocalStorage(ls),
),
),
)
}

View file

@ -0,0 +1,26 @@
package main
import (
"os"
"runtime/debug"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/runtime"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"go.uber.org/zap"
)
func setRuntimeParameters(c *cfg) {
if len(os.Getenv("GOMEMLIMIT")) != 0 {
// default limit < yaml limit < app env limit < GOMEMLIMIT
c.log.Warn(logs.RuntimeSoftMemoryDefinedWithGOMEMLIMIT)
return
}
memLimitBytes := runtime.GCMemoryLimitBytes(c.appCfg)
previous := debug.SetMemoryLimit(memLimitBytes)
if memLimitBytes != previous {
c.log.Info(logs.RuntimeSoftMemoryLimitUpdated,
zap.Int64("new_value", memLimitBytes),
zap.Int64("old_value", previous))
}
}

View file

@ -36,6 +36,7 @@ FROSTFS_TREE_REPLICATION_CHANNEL_CAPACITY=32
FROSTFS_TREE_REPLICATION_WORKER_COUNT=32
FROSTFS_TREE_REPLICATION_TIMEOUT=5s
FROSTFS_TREE_SYNC_INTERVAL=1h
FROSTFS_TREE_AUTHORIZED_KEYS="0397d207ea77909f7d66fa6f36d08daae22ace672be7ea4f53513484dde8a142a0 02053819235c20d784132deba10bb3061629e3a5c819a039ef091841d9d35dad56"
# gRPC section
## 0 server
@ -187,3 +188,5 @@ FROSTFS_STORAGE_SHARD_1_GC_REMOVER_SLEEP_INTERVAL=5m
FROSTFS_TRACING_ENABLED=true
FROSTFS_TRACING_ENDPOINT="localhost"
FROSTFS_TRACING_EXPORTER="otlp_grpc"
FROSTFS_RUNTIME_SOFT_MEMORY_LIMIT=1073741824

View file

@ -75,7 +75,11 @@
"replication_channel_capacity": 32,
"replication_worker_count": 32,
"replication_timeout": "5s",
"sync_interval": "1h"
"sync_interval": "1h",
"authorized_keys": [
"0397d207ea77909f7d66fa6f36d08daae22ace672be7ea4f53513484dde8a142a0",
"02053819235c20d784132deba10bb3061629e3a5c819a039ef091841d9d35dad56"
]
},
"control": {
"authorized_keys": [
@ -190,6 +194,7 @@
"resync_metabase": true,
"writecache": {
"enabled": true,
"type": "bbolt",
"path": "tmp/1/cache",
"memcache_capacity": 2147483648,
"small_object_size": 16384,
@ -241,5 +246,8 @@
"enabled": true,
"endpoint": "localhost:9090",
"exporter": "otlp_grpc"
},
"runtime": {
"soft_memory_limit": 1073741824
}
}

View file

@ -62,6 +62,9 @@ tree:
replication_channel_capacity: 32
replication_timeout: 5s
sync_interval: 1h
authorized_keys: # list of hex-encoded public keys that have rights to use the Tree Service with frostfs-cli
- 0397d207ea77909f7d66fa6f36d08daae22ace672be7ea4f53513484dde8a142a0
- 02053819235c20d784132deba10bb3061629e3a5c819a039ef091841d9d35dad56
control:
authorized_keys: # list of hex-encoded public keys that have rights to use the Control Service
@ -119,6 +122,7 @@ storage:
writecache:
enabled: true
type: bbolt
small_object_size: 16k # size threshold for "small" objects which are cached in key-value DB, not in FS, bytes
max_object_size: 134217728 # size threshold for "big" objects which bypass write-cache and go to the storage directly, bytes
workers_number: 30 # number of write-cache flusher threads
@ -214,3 +218,6 @@ tracing:
enabled: true
exporter: "otlp_grpc"
endpoint: "localhost"
runtime:
soft_memory_limit: 1gb

4
debian/rules vendored
View file

@ -12,8 +12,8 @@ override_dh_auto_install:
echo $(DEB_BUILD_OPTIONS)
dh_auto_install
bin/frostfs-adm gendoc -t man man/
bin/frostfs-cli gendoc -t man man/
bin/frostfs-adm gendoc --type man man/
bin/frostfs-cli gendoc --type man man/
bin/frostfs-adm completion bash > debian/frostfs-adm.bash-completion
bin/frostfs-cli completion bash > debian/frostfs-cli.bash-completion

View file

@ -24,6 +24,7 @@ There are some custom types used for brevity:
| `policer` | [Policer service configuration](#policer-section) |
| `replicator` | [Replicator service configuration](#replicator-section) |
| `storage` | [Storage engine configuration](#storage-section) |
| `runtime` | [Runtime configuration](#runtime-section) |
# `control` section
@ -272,6 +273,7 @@ metabase:
```yaml
writecache:
enabled: true
type: bbolt
path: /path/to/writecache
capacity: 4294967296
small_object_size: 16384
@ -281,6 +283,7 @@ writecache:
| Parameter | Type | Default value | Description |
|----------------------|------------|---------------|----------------------------------------------------------------------------------------------------------------------|
| `type` | `string` | | Type of write cache backing implementation to use (`bbolt`, `badger`). |
| `path` | `string` | | Path to the metabase file. |
| `capacity` | `size` | unrestricted | Approximate maximum size of the writecache. If the writecache is full, objects are written to the blobstor directly. |
| `small_object_size` | `size` | `32K` | Maximum object size for "small" objects. This objects are stored in a key-value database instead of a file-system. |
@ -426,3 +429,15 @@ object:
| `delete.tombstone_lifetime` | `int` | `5` | Tombstone lifetime for removed objects in epochs. |
| `put.pool_size_remote` | `int` | `10` | Max pool size for performing remote `PUT` operations. Used by Policer and Replicator services. |
| `put.pool_size_local` | `int` | `10` | Max pool size for performing local `PUT` operations. Used by Policer and Replicator services. |
# `runtime` section
Contains runtime parameters.
```yaml
runtime:
soft_memory_limit: 1GB
```
| Parameter | Type | Default value | Description |
|---------------------|--------|---------------|--------------------------------------------------------------------------|
| `soft_memory_limit` | `size` | 0 | Soft memory limit for the runtime. Zero or no value stands for no limit. If `GOMEMLIMIT` environment variable is set, the value from the configuration file will be ignored. |

18
go.mod
View file

@ -1,12 +1,12 @@
module git.frostfs.info/TrueCloudLab/frostfs-node
go 1.19
go 1.20
require (
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.15.1-0.20230602142716-68021b910acb
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.15.1-0.20230802075510-964c3edb3f44
git.frostfs.info/TrueCloudLab/frostfs-contract v0.0.0-20230627134746-36f3d39c406a
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20230531082742-c97d21411eb6
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20230628121302-5d62cef27e6c
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20230809065235-d48788c7a946
git.frostfs.info/TrueCloudLab/hrw v1.2.1
git.frostfs.info/TrueCloudLab/tzhash v1.8.0
github.com/cheggaaa/pb v1.0.29
@ -41,6 +41,17 @@ require (
gopkg.in/yaml.v3 v3.0.1
)
require (
github.com/dgraph-io/ristretto v0.1.1 // indirect
github.com/dustin/go-humanize v1.0.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/glog v1.1.0 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/google/flatbuffers v1.12.1 // indirect
github.com/pkg/errors v0.9.1 // indirect
go.opencensus.io v0.24.0 // indirect
)
require (
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 // indirect
git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0 // indirect
@ -55,6 +66,7 @@ require (
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect
github.com/dgraph-io/badger/v4 v4.1.0
github.com/fsnotify/fsnotify v1.6.0 // indirect
github.com/go-logr/logr v1.2.4 // indirect
github.com/go-logr/stdr v1.2.2 // indirect

BIN
go.sum

Binary file not shown.

View file

@ -52,7 +52,6 @@ const (
PolicerRoutineStopped = "routine stopped" // Info in ../node/pkg/services/policer/process.go
PolicerFailureAtObjectSelectForReplication = "failure at object select for replication" // Warn in ../node/pkg/services/policer/process.go
PolicerPoolSubmission = "pool submission" // Warn in ../node/pkg/services/policer/process.go
PolicerTuneReplicationCapacity = "tune replication capacity" // Debug in ../node/pkg/services/policer/process.go
ReplicatorFinishWork = "finish work" // Debug in ../node/pkg/services/replicator/process.go
ReplicatorCouldNotGetObjectFromLocalStorage = "could not get object from local storage" // Error in ../node/pkg/services/replicator/process.go
ReplicatorCouldNotReplicateObject = "could not replicate object" // Error in ../node/pkg/services/replicator/process.go
@ -292,191 +291,193 @@ const (
ShardFailureToMarkLockersAsGarbage = "failure to mark lockers as garbage" // Warn in ../node/pkg/local_object_storage/shard/gc.go
ShardFailureToGetExpiredUnlockedObjects = "failure to get expired unlocked objects" // Warn in ../node/pkg/local_object_storage/shard/gc.go
ShardCouldNotMarkObjectToDeleteInMetabase = "could not mark object to delete in metabase" // Debug in ../node/pkg/local_object_storage/shard/inhume.go
WritecacheTriedToFlushItemsFromWritecache = "tried to flush items from write-cache" // Debug in ../node/pkg/local_object_storage/writecache/flush.go
WritecacheWaitingForChannelsToFlush = "waiting for channels to flush" // Info in ../node/pkg/local_object_storage/writecache/mode.go
WritecacheFillingFlushMarksForObjectsInFSTree = "filling flush marks for objects in FSTree" // Info in ../node/pkg/local_object_storage/writecache/init.go
WritecacheFinishedUpdatingFSTreeFlushMarks = "finished updating FSTree flush marks" // Info in ../node/pkg/local_object_storage/writecache/init.go
WritecacheFillingFlushMarksForObjectsInDatabase = "filling flush marks for objects in database" // Info in ../node/pkg/local_object_storage/writecache/init.go
WritecacheFinishedUpdatingFlushMarks = "finished updating flush marks" // Info in ../node/pkg/local_object_storage/writecache/init.go
WritecacheCantRemoveObjectsFromTheDatabase = "can't remove objects from the database" // Error in ../node/pkg/local_object_storage/writecache/storage.go
WritecacheCantParseAddress = "can't parse address" // Error in ../node/pkg/local_object_storage/writecache/storage.go
WritecacheCantRemoveObjectFromWritecache = "can't remove object from write-cache" // Error in ../node/pkg/local_object_storage/writecache/storage.go
BlobovniczatreeCouldNotGetObjectFromLevel = "could not get object from level" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go
BlobovniczatreeCouldNotReadPayloadRangeFromOpenedBlobovnicza = "could not read payload range from opened blobovnicza" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go
BlobovniczatreeCouldNotReadPayloadRangeFromActiveBlobovnicza = "could not read payload range from active blobovnicza" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go
BlobovniczatreeCouldNotCloseBlobovnicza = "could not close Blobovnicza" // Error in ../node/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go
BlobovniczatreeBlobovniczaSuccessfullyClosedOnEvict = "blobovnicza successfully closed on evict" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go
BlobovniczatreeUpdatingActiveBlobovnicza = "updating active blobovnicza..." // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go
BlobovniczatreeActiveBlobovniczaSuccessfullyUpdated = "active blobovnicza successfully updated" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go
BlobovniczatreeBlobovniczaSuccessfullyActivated = "blobovnicza successfully activated" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go
BlobovniczatreeCouldNotRemoveObjectFromLevel = "could not remove object from level" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/delete.go
BlobovniczatreeCouldNotRemoveObjectFromOpenedBlobovnicza = "could not remove object from opened blobovnicza" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/delete.go
BlobovniczatreeCouldNotRemoveObjectFromActiveBlobovnicza = "could not remove object from active blobovnicza" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/delete.go
BlobovniczatreeCouldNotGetActiveBlobovnicza = "could not get active blobovnicza" // reportError in ../node/pkg/local_object_storage/blobstor/blobovniczatree/put.go
BlobovniczatreeBlobovniczaOverflowed = "blobovnicza overflowed" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/put.go
BlobovniczatreeCouldNotUpdateActiveBlobovnicza = "could not update active blobovnicza" // reportError in ../node/pkg/local_object_storage/blobstor/blobovniczatree/put.go
BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza = "could not put object to active blobovnicza" // reportError in ../node/pkg/local_object_storage/blobstor/blobovniczatree/put.go
BlobovniczatreeCouldNotReadObjectFromOpenedBlobovnicza = "could not read object from opened blobovnicza" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/get.go
BlobovniczatreeCouldNotGetObjectFromActiveBlobovnicza = "could not get object from active blobovnicza" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/get.go
BlobovniczatreeInitializingBlobovniczas = "initializing Blobovnicza's" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/control.go
BlobovniczatreeReadonlyModeSkipBlobovniczasInitialization = "read-only mode, skip blobovniczas initialization..." // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/control.go
BlobovniczatreeBlobovniczaSuccessfullyInitializedClosing = "blobovnicza successfully initialized, closing..." // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/control.go
BlobovniczatreeCouldNotCloseActiveBlobovnicza = "could not close active blobovnicza" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/control.go
AlphabetTick = "tick" // Info in ../node/pkg/innerring/processors/alphabet/handlers.go
AlphabetAlphabetProcessorWorkerPoolDrained = "alphabet processor worker pool drained" // Warn in ../node/pkg/innerring/processors/alphabet/handlers.go
AlphabetNonAlphabetModeIgnoreGasEmissionEvent = "non alphabet mode, ignore gas emission event" // Info in ../node/pkg/innerring/processors/alphabet/process_emit.go
AlphabetNodeIsOutOfAlphabetRangeIgnoreGasEmissionEvent = "node is out of alphabet range, ignore gas emission event" // Debug in ../node/pkg/innerring/processors/alphabet/process_emit.go
AlphabetCantInvokeAlphabetEmitMethod = "can't invoke alphabet emit method" // Warn in ../node/pkg/innerring/processors/alphabet/process_emit.go
AlphabetStorageNodeEmissionIsOff = "storage node emission is off" // Info in ../node/pkg/innerring/processors/alphabet/process_emit.go
AlphabetCantGetNetmapSnapshotToEmitGasToStorageNodes = "can't get netmap snapshot to emit gas to storage nodes" // Warn in ../node/pkg/innerring/processors/alphabet/process_emit.go
AlphabetGasEmission = "gas emission" // Debug in ../node/pkg/innerring/processors/alphabet/process_emit.go
AlphabetCantParseNodePublicKey = "can't parse node public key" // Warn in ../node/pkg/innerring/processors/alphabet/process_emit.go
AlphabetCantTransferGas = "can't transfer gas" // Warn in ../node/pkg/innerring/processors/alphabet/process_emit.go
AlphabetCantTransferGasToWallet = "can't transfer gas to wallet" // Warn in ../node/pkg/innerring/processors/alphabet/process_emit.go
AlphabetAlphabetWorkerPool = "alphabet worker pool" // Debug in ../node/pkg/innerring/processors/alphabet/processor.go
BalanceBalanceWorkerPoolDrained = "balance worker pool drained" // Warn in ../node/pkg/innerring/processors/balance/handlers.go
BalanceNonAlphabetModeIgnoreBalanceLock = "non alphabet mode, ignore balance lock" // Info in ../node/pkg/innerring/processors/balance/process_assets.go
BalanceCantSendLockAssetTx = "can't send lock asset tx" // Error in ../node/pkg/innerring/processors/balance/process_assets.go
BalanceBalanceWorkerPool = "balance worker pool" // Debug in ../node/pkg/innerring/processors/balance/processor.go
ContainerContainerWorkerPool = "container worker pool" // Debug in ../node/pkg/innerring/processors/container/processor.go
ContainerContainerProcessorWorkerPoolDrained = "container processor worker pool drained" // Warn in ../node/pkg/innerring/processors/container/handlers.go
ContainerNonAlphabetModeIgnoreContainerPut = "non alphabet mode, ignore container put" // Info in ../node/pkg/innerring/processors/container/process_container.go
ContainerPutContainerCheckFailed = "put container check failed" // Error in ../node/pkg/innerring/processors/container/process_container.go
ContainerCouldNotApprovePutContainer = "could not approve put container" // Error in ../node/pkg/innerring/processors/container/process_container.go
ContainerNonAlphabetModeIgnoreContainerDelete = "non alphabet mode, ignore container delete" // Info in ../node/pkg/innerring/processors/container/process_container.go
ContainerDeleteContainerCheckFailed = "delete container check failed" // Error in ../node/pkg/innerring/processors/container/process_container.go
ContainerCouldNotApproveDeleteContainer = "could not approve delete container" // Error in ../node/pkg/innerring/processors/container/process_container.go
ContainerNonAlphabetModeIgnoreSetEACL = "non alphabet mode, ignore set EACL" // Info in ../node/pkg/innerring/processors/container/process_eacl.go
ContainerSetEACLCheckFailed = "set EACL check failed" // Error in ../node/pkg/innerring/processors/container/process_eacl.go
ContainerCouldNotApproveSetEACL = "could not approve set EACL" // Error in ../node/pkg/innerring/processors/container/process_eacl.go
FrostFSNonAlphabetModeIgnoreBind = "non alphabet mode, ignore bind" // Info in ../node/pkg/innerring/processors/frostfs/process_bind.go
FrostFSInvalidManageKeyEvent = "invalid manage key event" // Error in ../node/pkg/innerring/processors/frostfs/process_bind.go
FrostFSCouldNotDecodeScriptHashFromBytes = "could not decode script hash from bytes" // Error in ../node/pkg/innerring/processors/frostfs/process_bind.go
FrostFSNonAlphabetModeIgnoreConfig = "non alphabet mode, ignore config" // Info in ../node/pkg/innerring/processors/frostfs/process_config.go
FrostFSCantRelaySetConfigEvent = "can't relay set config event" // Error in ../node/pkg/innerring/processors/frostfs/process_config.go
FrostFSFrostfsWorkerPool = "frostfs worker pool" // Debug in ../node/pkg/innerring/processors/frostfs/processor.go
FrostFSFrostfsProcessorWorkerPoolDrained = "frostfs processor worker pool drained" // Warn in ../node/pkg/innerring/processors/frostfs/handlers.go
FrostFSNonAlphabetModeIgnoreDeposit = "non alphabet mode, ignore deposit" // Info in ../node/pkg/innerring/processors/frostfs/process_assets.go
FrostFSCantTransferAssetsToBalanceContract = "can't transfer assets to balance contract" // Error in ../node/pkg/innerring/processors/frostfs/process_assets.go
FrostFSDoubleMintEmissionDeclined = "double mint emission declined" // Warn in ../node/pkg/innerring/processors/frostfs/process_assets.go
FrostFSCantGetGasBalanceOfTheNode = "can't get gas balance of the node" // Error in ../node/pkg/innerring/processors/frostfs/process_assets.go
FrostFSGasBalanceThresholdHasBeenReached = "gas balance threshold has been reached" // Warn in ../node/pkg/innerring/processors/frostfs/process_assets.go
FrostFSCantTransferNativeGasToReceiver = "can't transfer native gas to receiver" // Error in ../node/pkg/innerring/processors/frostfs/process_assets.go
FrostFSNonAlphabetModeIgnoreWithdraw = "non alphabet mode, ignore withdraw" // Info in ../node/pkg/innerring/processors/frostfs/process_assets.go
FrostFSCantCreateLockAccount = "can't create lock account" // Error in ../node/pkg/innerring/processors/frostfs/process_assets.go
FrostFSCantLockAssetsForWithdraw = "can't lock assets for withdraw" // Error in ../node/pkg/innerring/processors/frostfs/process_assets.go
FrostFSNonAlphabetModeIgnoreCheque = "non alphabet mode, ignore cheque" // Info in ../node/pkg/innerring/processors/frostfs/process_assets.go
FrostFSCantTransferAssetsToFedContract = "can't transfer assets to fed contract" // Error in ../node/pkg/innerring/processors/frostfs/process_assets.go
GovernanceNewEvent = "new event" // Info in ../node/pkg/innerring/processors/governance/handlers.go
GovernanceGovernanceWorkerPoolDrained = "governance worker pool drained" // Warn in ../node/pkg/innerring/processors/governance/handlers.go
GovernanceNonAlphabetModeIgnoreAlphabetSync = "non alphabet mode, ignore alphabet sync" // Info in ../node/pkg/innerring/processors/governance/process_update.go
GovernanceCantFetchAlphabetListFromMainNet = "can't fetch alphabet list from main net" // Error in ../node/pkg/innerring/processors/governance/process_update.go
GovernanceCantFetchAlphabetListFromSideChain = "can't fetch alphabet list from side chain" // Error in ../node/pkg/innerring/processors/governance/process_update.go
GovernanceCantMergeAlphabetListsFromMainNetAndSideChain = "can't merge alphabet lists from main net and side chain" // Error in ../node/pkg/innerring/processors/governance/process_update.go
GovernanceNoGovernanceUpdateAlphabetListHasNotBeenChanged = "no governance update, alphabet list has not been changed" // Info in ../node/pkg/innerring/processors/governance/process_update.go
GovernanceAlphabetListHasBeenChangedStartingUpdate = "alphabet list has been changed, starting update" // Info in ../node/pkg/innerring/processors/governance/process_update.go
GovernanceCantVoteForSideChainCommittee = "can't vote for side chain committee" // Error in ../node/pkg/innerring/processors/governance/process_update.go
GovernanceFinishedAlphabetListUpdate = "finished alphabet list update" // Info in ../node/pkg/innerring/processors/governance/process_update.go
GovernanceCantFetchInnerRingListFromSideChain = "can't fetch inner ring list from side chain" // Error in ../node/pkg/innerring/processors/governance/process_update.go
GovernanceCantCreateNewInnerRingListWithNewAlphabetKeys = "can't create new inner ring list with new alphabet keys" // Error in ../node/pkg/innerring/processors/governance/process_update.go
GovernanceUpdateOfTheInnerRingList = "update of the inner ring list" // Info in ../node/pkg/innerring/processors/governance/process_update.go
GovernanceCantUpdateInnerRingListWithNewAlphabetKeys = "can't update inner ring list with new alphabet keys" // Error in ../node/pkg/innerring/processors/governance/process_update.go
GovernanceCantUpdateListOfNotaryNodesInSideChain = "can't update list of notary nodes in side chain" // Error in ../node/pkg/innerring/processors/governance/process_update.go
GovernanceCantUpdateListOfAlphabetNodesInFrostfsContract = "can't update list of alphabet nodes in frostfs contract" // Error in ../node/pkg/innerring/processors/governance/process_update.go
NetmapNetmapWorkerPool = "netmap worker pool" // Debug in ../node/pkg/innerring/processors/netmap/processor.go
NetmapTick = "tick" // Info in ../node/pkg/innerring/processors/netmap/handlers.go
NetmapNetmapWorkerPoolDrained = "netmap worker pool drained" // Warn in ../node/pkg/innerring/processors/netmap/handlers.go
NetmapNetmapCleanUpRoutineIsDisabled518 = "netmap clean up routine is disabled" // Debug in ../node/pkg/innerring/processors/netmap/handlers.go
NetmapNonAlphabetModeIgnoreNewNetmapCleanupTick = "non alphabet mode, ignore new netmap cleanup tick" // Info in ../node/pkg/innerring/processors/netmap/process_cleanup.go
NetmapCantDecodePublicKeyOfNetmapNode = "can't decode public key of netmap node" // Warn in ../node/pkg/innerring/processors/netmap/process_cleanup.go
NetmapVoteToRemoveNodeFromNetmap = "vote to remove node from netmap" // Info in ../node/pkg/innerring/processors/netmap/process_cleanup.go
NetmapCantInvokeNetmapUpdateState = "can't invoke netmap.UpdateState" // Error in ../node/pkg/innerring/processors/netmap/process_cleanup.go
NetmapCantIterateOnNetmapCleanerCache = "can't iterate on netmap cleaner cache" // Warn in ../node/pkg/innerring/processors/netmap/process_cleanup.go
NetmapCantGetEpochDuration = "can't get epoch duration" // Warn in ../node/pkg/innerring/processors/netmap/process_epoch.go
NetmapCantGetTransactionHeight = "can't get transaction height" // Warn in ../node/pkg/innerring/processors/netmap/process_epoch.go
NetmapCantResetEpochTimer = "can't reset epoch timer" // Warn in ../node/pkg/innerring/processors/netmap/process_epoch.go
NetmapCantGetNetmapSnapshotToPerformCleanup = "can't get netmap snapshot to perform cleanup" // Warn in ../node/pkg/innerring/processors/netmap/process_epoch.go
NetmapCantStartContainerSizeEstimation = "can't start container size estimation" // Warn in ../node/pkg/innerring/processors/netmap/process_epoch.go
NetmapNonAlphabetModeIgnoreNewEpochTick = "non alphabet mode, ignore new epoch tick" // Info in ../node/pkg/innerring/processors/netmap/process_epoch.go
NetmapNextEpoch = "next epoch" // Debug in ../node/pkg/innerring/processors/netmap/process_epoch.go
NetmapCantInvokeNetmapNewEpoch = "can't invoke netmap.NewEpoch" // Error in ../node/pkg/innerring/processors/netmap/process_epoch.go
NetmapNonAlphabetModeIgnoreNewPeerNotification = "non alphabet mode, ignore new peer notification" // Info in ../node/pkg/innerring/processors/netmap/process_peers.go
NetmapNonhaltNotaryTransaction = "non-halt notary transaction" // Warn in ../node/pkg/innerring/processors/netmap/process_peers.go
NetmapCantParseNetworkMapCandidate = "can't parse network map candidate" // Warn in ../node/pkg/innerring/processors/netmap/process_peers.go
NetmapCouldNotVerifyAndUpdateInformationAboutNetworkMapCandidate = "could not verify and update information about network map candidate" // Warn in ../node/pkg/innerring/processors/netmap/process_peers.go
NetmapApprovingNetworkMapCandidate = "approving network map candidate" // Info in ../node/pkg/innerring/processors/netmap/process_peers.go
NetmapCantInvokeNetmapAddPeer = "can't invoke netmap.AddPeer" // Error in ../node/pkg/innerring/processors/netmap/process_peers.go
NetmapNonAlphabetModeIgnoreUpdatePeerNotification = "non alphabet mode, ignore update peer notification" // Info in ../node/pkg/innerring/processors/netmap/process_peers.go
NetmapPreventSwitchingNodeToMaintenanceState = "prevent switching node to maintenance state" // Info in ../node/pkg/innerring/processors/netmap/process_peers.go
NetmapCantInvokeNetmapUpdatePeer = "can't invoke netmap.UpdatePeer" // Error in ../node/pkg/innerring/processors/netmap/process_peers.go // Debug in ../node/pkg/innerring/processors/reputation/processor.go
FrostFSIRInternalError = "internal error" // Info in ../node/cmd/frostfs-ir/main.go
FrostFSIRCouldNotShutdownHTTPServer = "could not shutdown HTTP server" // Debug in ../node/cmd/frostfs-ir/main.go
FrostFSIRApplicationStopped = "application stopped" // Info in ../node/cmd/frostfs-ir/main.go
FrostFSIRCouldntCreateRPCClientForEndpoint = "could not create RPC client for endpoint" // Debug in ../node/pkg/morph/client/constructor.go
FrostFSIRCreatedRPCClientForEndpoint = "created RPC client for endpoint" // Info in ../node/pkg/morph/client/constructor.go
FrostFSIRReloadExtraWallets = "reload extra wallets" // Info in ../node/cmd/frostfs-ir/config.go
FrostFSNodeCouldNotReadCertificateFromFile = "could not read certificate from file" // Error in ../node/cmd/frostfs-node/grpc.go
FrostFSNodeCantListenGRPCEndpoint = "can't listen gRPC endpoint" // Error in ../node/cmd/frostfs-node/grpc.go
FrostFSNodeStopListeningGRPCEndpoint = "stop listening gRPC endpoint" // Info in ../node/cmd/frostfs-node/grpc.go
FrostFSNodeStartListeningGRPCEndpoint = "start listening gRPC endpoint" // Info in ../node/cmd/frostfs-node/grpc.go
FrostFSNodeStoppingGRPCServer = "stopping gRPC server..." // Info in ../node/cmd/frostfs-node/grpc.go
FrostFSNodeGRPCCannotShutdownGracefullyForcingStop = "gRPC cannot shutdown gracefully, forcing stop" // Info in ../node/cmd/frostfs-node/grpc.go
FrostFSNodeGRPCServerStoppedSuccessfully = "gRPC server stopped successfully" // Info in ../node/cmd/frostfs-node/grpc.go
FrostFSNodeWaitingForAllProcessesToStop = "waiting for all processes to stop" // Debug in ../node/cmd/frostfs-node/main.go
FrostFSNodeStartedLocalNodesMaintenance = "started local node's maintenance" // Info in ../node/cmd/frostfs-node/config.go
FrostFSNodeStoppedLocalNodesMaintenance = "stopped local node's maintenance" // Info in ../node/cmd/frostfs-node/config.go
FrostFSNodeFailedToAttachShardToEngine = "failed to attach shard to engine" // Error in ../node/cmd/frostfs-node/config.go
FrostFSNodeShardAttachedToEngine = "shard attached to engine" // Info in ../node/cmd/frostfs-node/config.go
FrostFSNodeClosingComponentsOfTheStorageEngine = "closing components of the storage engine..." // Info in ../node/cmd/frostfs-node/config.go
FrostFSNodeStorageEngineClosingFailure = "storage engine closing failure" // Info in ../node/cmd/frostfs-node/config.go
FrostFSNodeAllComponentsOfTheStorageEngineClosedSuccessfully = "all components of the storage engine closed successfully" // Info in ../node/cmd/frostfs-node/config.go
FrostFSNodeBootstrappingWithTheMaintenanceState = "bootstrapping with the maintenance state" // Info in ../node/cmd/frostfs-node/config.go
FrostFSNodeBootstrappingWithOnlineState = "bootstrapping with online state" // Info in ../node/cmd/frostfs-node/config.go
FrostFSNodeTerminationSignalHasBeenReceivedStopping = "termination signal has been received, stopping..." // Info in ../node/cmd/frostfs-node/config.go
FrostFSNodeTerminationSignalProcessingIsComplete = "termination signal processing is complete" // Info in ../node/cmd/frostfs-node/config.go
FrostFSNodeInternalApplicationError = "internal application error" // Warn in ../node/cmd/frostfs-node/config.go
FrostFSNodeInternalErrorProcessingIsComplete = "internal error processing is complete" // Info in ../node/cmd/frostfs-node/config.go
FrostFSNodeSIGHUPHasBeenReceivedRereadingConfiguration = "SIGHUP has been received, rereading configuration..." // Info in ../node/cmd/frostfs-node/config.go
FrostFSNodeConfigurationReading = "configuration reading" // Error in ../node/cmd/frostfs-node/config.go
FrostFSNodeLoggerConfigurationPreparation = "logger configuration preparation" // Error in ../node/cmd/frostfs-node/config.go
FrostFSNodeTracingConfigationUpdated = "tracing configation updated" // Info in ../node/cmd/frostfs-node/config.go
FrostFSNodeStorageEngineConfigurationUpdate = "storage engine configuration update" // Error in ../node/cmd/frostfs-node/config.go
FrostFSNodeUpdatedConfigurationApplying = "updated configuration applying" // Error in ../node/cmd/frostfs-node/config.go
FrostFSNodeConfigurationHasBeenReloadedSuccessfully = "configuration has been reloaded successfully" // Info in ../node/cmd/frostfs-node/config.go
FrostFSNodeReadNewlyCreatedContainerAfterTheNotification = "read newly created container after the notification" // Error in ../node/cmd/frostfs-node/container.go
FrostFSNodeContainerCreationEventsReceipt = "container creation event's receipt" // Debug in ../node/cmd/frostfs-node/container.go
FrostFSNodeContainerRemovalEventsReceipt = "container removal event's receipt" // Debug in ../node/cmd/frostfs-node/container.go
FrostFSNodeSaveUsedSpaceAnnouncementInContract = "save used space announcement in contract" // Debug in ../node/cmd/frostfs-node/container.go
FrostFSNodeFailedToCalculateContainerSizeInStorageEngine = "failed to calculate container size in storage engine" // Debug in ../node/cmd/frostfs-node/container.go
FrostFSNodeContainerSizeInStorageEngineCalculatedSuccessfully = "container size in storage engine calculated successfully" // Debug in ../node/cmd/frostfs-node/container.go
FrostFSNodeNotificatorCouldNotListContainers = "notificator: could not list containers" // Error in ../node/cmd/frostfs-node/notificator.go
FrostFSNodeNotificatorCouldNotSelectObjectsFromContainer = "notificator: could not select objects from container" // Error in ../node/cmd/frostfs-node/notificator.go
FrostFSNodeNotificatorCouldNotProcessObject = "notificator: could not process object" // Error in ../node/cmd/frostfs-node/notificator.go
FrostFSNodeNotificatorFinishedProcessingObjectNotifications = "notificator: finished processing object notifications" // Debug in ../node/cmd/frostfs-node/notificator.go
FrostFSNodeCouldNotWriteObjectNotification = "could not write object notification" // Warn in ../node/cmd/frostfs-node/notificator.go
FrostFSNodeCouldNotGetMaxObjectSizeValue = "could not get max object size value" // Error in ../node/cmd/frostfs-node/object.go
FrostFSNodeCouldNotInhumeMarkRedundantCopyAsGarbage = "could not inhume mark redundant copy as garbage" // Warn in ../node/cmd/frostfs-node/object.go
FrostFSNodeFailedInitTracing = "failed init tracing" // Error in ../node/cmd/frostfs-node/tracing.go
FrostFSNodeFailedShutdownTracing = "failed shutdown tracing" // Error in ../node/cmd/frostfs-node/tracing.go
FrostFSNodeFailedToCreateNeoRPCClient = "failed to create neo RPC client" // Info in ../node/cmd/frostfs-node/morph.go
FrostFSNodeClosingMorphComponents = "closing morph components..." // Info in ../node/cmd/frostfs-node/morph.go
FrostFSNodeFailedToSetGroupSignerScopeContinueWithGlobal = "failed to set group signer scope, continue with Global" // Info in ../node/cmd/frostfs-node/morph.go
FrostFSNodeNotarySupport = "notary support" // Info in ../node/cmd/frostfs-node/morph.go
FrostFSNodeMorphcacheTTLFetchedFromNetwork = "morph.cache_ttl fetched from network" // Debug in ../node/cmd/frostfs-node/morph.go
FrostFSNodeNotaryDepositHasAlreadyBeenMade = "notary deposit has already been made" // Info in ../node/cmd/frostfs-node/morph.go
FrostFSNodeCantGetLastProcessedSideChainBlockNumber = "can't get last processed side chain block number" // Warn in ../node/cmd/frostfs-node/morph.go
FrostFSNodeNewEpochEventFromSidechain = "new epoch event from sidechain" // Info in ../node/cmd/frostfs-node/morph.go
FrostFSNodeNewBlock = "new block" // Debug in ../node/cmd/frostfs-node/morph.go
FrostFSNodeCantUpdatePersistentState = "can't update persistent state" // Warn in ../node/cmd/frostfs-node/morph.go
FrostFSNodeCantSendRebootstrapTx = "can't send re-bootstrap tx" // Warn in ../node/cmd/frostfs-node/netmap.go
FrostFSNodeCouldNotUpdateNodeStateOnNewEpoch = "could not update node state on new epoch" // Error in ../node/cmd/frostfs-node/netmap.go
FrostFSNodeCouldNotMakeNotaryDeposit = "could not make notary deposit" // Error in ../node/cmd/frostfs-node/netmap.go
FrostFSNodeInitialNetworkState = "initial network state" // Info in ../node/cmd/frostfs-node/netmap.go
FrostFSNodeTreeServiceIsNotEnabledSkipInitialization = "tree service is not enabled, skip initialization" // Info in ../node/cmd/frostfs-node/tree.go
FrostFSNodeCouldNotSynchronizeTreeService = "could not synchronize Tree Service" // Error in ../node/cmd/frostfs-node/tree.go
FrostFSNodeRemovingAllTreesForContainer = "removing all trees for container" // Debug in ../node/cmd/frostfs-node/tree.go
FrostFSNodeContainerRemovalEventReceivedButTreesWerentRemoved = "container removal event received, but trees weren't removed" // Error in ../node/cmd/frostfs-node/tree.go
FrostFSNodeCantListenGRPCEndpointControl = "can't listen gRPC endpoint (control)" // Error in ../node/cmd/frostfs-node/control.go
WritecacheBadgerInitExperimental = "initializing badger-backed experimental writecache"
WritecacheTriedToFlushItemsFromWritecache = "tried to flush items from write-cache" // Debug in ../node/pkg/local_object_storage/writecache/flush.go
WritecacheWaitingForChannelsToFlush = "waiting for channels to flush" // Info in ../node/pkg/local_object_storage/writecache/mode.go
WritecacheFillingFlushMarksForObjectsInFSTree = "filling flush marks for objects in FSTree" // Info in ../node/pkg/local_object_storage/writecache/init.go
WritecacheFinishedUpdatingFSTreeFlushMarks = "finished updating FSTree flush marks" // Info in ../node/pkg/local_object_storage/writecache/init.go
WritecacheFillingFlushMarksForObjectsInDatabase = "filling flush marks for objects in database" // Info in ../node/pkg/local_object_storage/writecache/init.go
WritecacheFinishedUpdatingFlushMarks = "finished updating flush marks" // Info in ../node/pkg/local_object_storage/writecache/init.go
WritecacheCantRemoveObjectsFromTheDatabase = "can't remove objects from the database" // Error in ../node/pkg/local_object_storage/writecache/storage.go
WritecacheCantParseAddress = "can't parse address" // Error in ../node/pkg/local_object_storage/writecache/storage.go
WritecacheCantRemoveObjectFromWritecache = "can't remove object from write-cache" // Error in ../node/pkg/local_object_storage/writecache/storage.go
WritecacheDBValueLogGCRunCompleted = "value log GC run completed"
BlobovniczatreeCouldNotGetObjectFromLevel = "could not get object from level" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go
BlobovniczatreeCouldNotReadPayloadRangeFromOpenedBlobovnicza = "could not read payload range from opened blobovnicza" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go
BlobovniczatreeCouldNotReadPayloadRangeFromActiveBlobovnicza = "could not read payload range from active blobovnicza" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go
BlobovniczatreeCouldNotCloseBlobovnicza = "could not close Blobovnicza" // Error in ../node/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go
BlobovniczatreeBlobovniczaSuccessfullyClosedOnEvict = "blobovnicza successfully closed on evict" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go
BlobovniczatreeUpdatingActiveBlobovnicza = "updating active blobovnicza..." // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go
BlobovniczatreeActiveBlobovniczaSuccessfullyUpdated = "active blobovnicza successfully updated" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go
BlobovniczatreeBlobovniczaSuccessfullyActivated = "blobovnicza successfully activated" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go
BlobovniczatreeCouldNotRemoveObjectFromLevel = "could not remove object from level" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/delete.go
BlobovniczatreeCouldNotRemoveObjectFromOpenedBlobovnicza = "could not remove object from opened blobovnicza" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/delete.go
BlobovniczatreeCouldNotRemoveObjectFromActiveBlobovnicza = "could not remove object from active blobovnicza" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/delete.go
BlobovniczatreeCouldNotGetActiveBlobovnicza = "could not get active blobovnicza" // reportError in ../node/pkg/local_object_storage/blobstor/blobovniczatree/put.go
BlobovniczatreeBlobovniczaOverflowed = "blobovnicza overflowed" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/put.go
BlobovniczatreeCouldNotUpdateActiveBlobovnicza = "could not update active blobovnicza" // reportError in ../node/pkg/local_object_storage/blobstor/blobovniczatree/put.go
BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza = "could not put object to active blobovnicza" // reportError in ../node/pkg/local_object_storage/blobstor/blobovniczatree/put.go
BlobovniczatreeCouldNotReadObjectFromOpenedBlobovnicza = "could not read object from opened blobovnicza" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/get.go
BlobovniczatreeCouldNotGetObjectFromActiveBlobovnicza = "could not get object from active blobovnicza" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/get.go
BlobovniczatreeInitializingBlobovniczas = "initializing Blobovnicza's" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/control.go
BlobovniczatreeReadonlyModeSkipBlobovniczasInitialization = "read-only mode, skip blobovniczas initialization..." // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/control.go
BlobovniczatreeBlobovniczaSuccessfullyInitializedClosing = "blobovnicza successfully initialized, closing..." // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/control.go
BlobovniczatreeCouldNotCloseActiveBlobovnicza = "could not close active blobovnicza" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/control.go
AlphabetTick = "tick" // Info in ../node/pkg/innerring/processors/alphabet/handlers.go
AlphabetAlphabetProcessorWorkerPoolDrained = "alphabet processor worker pool drained" // Warn in ../node/pkg/innerring/processors/alphabet/handlers.go
AlphabetNonAlphabetModeIgnoreGasEmissionEvent = "non alphabet mode, ignore gas emission event" // Info in ../node/pkg/innerring/processors/alphabet/process_emit.go
AlphabetNodeIsOutOfAlphabetRangeIgnoreGasEmissionEvent = "node is out of alphabet range, ignore gas emission event" // Debug in ../node/pkg/innerring/processors/alphabet/process_emit.go
AlphabetCantInvokeAlphabetEmitMethod = "can't invoke alphabet emit method" // Warn in ../node/pkg/innerring/processors/alphabet/process_emit.go
AlphabetStorageNodeEmissionIsOff = "storage node emission is off" // Info in ../node/pkg/innerring/processors/alphabet/process_emit.go
AlphabetCantGetNetmapSnapshotToEmitGasToStorageNodes = "can't get netmap snapshot to emit gas to storage nodes" // Warn in ../node/pkg/innerring/processors/alphabet/process_emit.go
AlphabetGasEmission = "gas emission" // Debug in ../node/pkg/innerring/processors/alphabet/process_emit.go
AlphabetCantParseNodePublicKey = "can't parse node public key" // Warn in ../node/pkg/innerring/processors/alphabet/process_emit.go
AlphabetCantTransferGas = "can't transfer gas" // Warn in ../node/pkg/innerring/processors/alphabet/process_emit.go
AlphabetCantTransferGasToWallet = "can't transfer gas to wallet" // Warn in ../node/pkg/innerring/processors/alphabet/process_emit.go
AlphabetAlphabetWorkerPool = "alphabet worker pool" // Debug in ../node/pkg/innerring/processors/alphabet/processor.go
BalanceBalanceWorkerPoolDrained = "balance worker pool drained" // Warn in ../node/pkg/innerring/processors/balance/handlers.go
BalanceNonAlphabetModeIgnoreBalanceLock = "non alphabet mode, ignore balance lock" // Info in ../node/pkg/innerring/processors/balance/process_assets.go
BalanceCantSendLockAssetTx = "can't send lock asset tx" // Error in ../node/pkg/innerring/processors/balance/process_assets.go
BalanceBalanceWorkerPool = "balance worker pool" // Debug in ../node/pkg/innerring/processors/balance/processor.go
ContainerContainerWorkerPool = "container worker pool" // Debug in ../node/pkg/innerring/processors/container/processor.go
ContainerContainerProcessorWorkerPoolDrained = "container processor worker pool drained" // Warn in ../node/pkg/innerring/processors/container/handlers.go
ContainerNonAlphabetModeIgnoreContainerPut = "non alphabet mode, ignore container put" // Info in ../node/pkg/innerring/processors/container/process_container.go
ContainerPutContainerCheckFailed = "put container check failed" // Error in ../node/pkg/innerring/processors/container/process_container.go
ContainerCouldNotApprovePutContainer = "could not approve put container" // Error in ../node/pkg/innerring/processors/container/process_container.go
ContainerNonAlphabetModeIgnoreContainerDelete = "non alphabet mode, ignore container delete" // Info in ../node/pkg/innerring/processors/container/process_container.go
ContainerDeleteContainerCheckFailed = "delete container check failed" // Error in ../node/pkg/innerring/processors/container/process_container.go
ContainerCouldNotApproveDeleteContainer = "could not approve delete container" // Error in ../node/pkg/innerring/processors/container/process_container.go
ContainerNonAlphabetModeIgnoreSetEACL = "non alphabet mode, ignore set EACL" // Info in ../node/pkg/innerring/processors/container/process_eacl.go
ContainerSetEACLCheckFailed = "set EACL check failed" // Error in ../node/pkg/innerring/processors/container/process_eacl.go
ContainerCouldNotApproveSetEACL = "could not approve set EACL" // Error in ../node/pkg/innerring/processors/container/process_eacl.go
FrostFSNonAlphabetModeIgnoreBind = "non alphabet mode, ignore bind" // Info in ../node/pkg/innerring/processors/frostfs/process_bind.go
FrostFSInvalidManageKeyEvent = "invalid manage key event" // Error in ../node/pkg/innerring/processors/frostfs/process_bind.go
FrostFSCouldNotDecodeScriptHashFromBytes = "could not decode script hash from bytes" // Error in ../node/pkg/innerring/processors/frostfs/process_bind.go
FrostFSNonAlphabetModeIgnoreConfig = "non alphabet mode, ignore config" // Info in ../node/pkg/innerring/processors/frostfs/process_config.go
FrostFSCantRelaySetConfigEvent = "can't relay set config event" // Error in ../node/pkg/innerring/processors/frostfs/process_config.go
FrostFSFrostfsWorkerPool = "frostfs worker pool" // Debug in ../node/pkg/innerring/processors/frostfs/processor.go
FrostFSFrostfsProcessorWorkerPoolDrained = "frostfs processor worker pool drained" // Warn in ../node/pkg/innerring/processors/frostfs/handlers.go
FrostFSNonAlphabetModeIgnoreDeposit = "non alphabet mode, ignore deposit" // Info in ../node/pkg/innerring/processors/frostfs/process_assets.go
FrostFSCantTransferAssetsToBalanceContract = "can't transfer assets to balance contract" // Error in ../node/pkg/innerring/processors/frostfs/process_assets.go
FrostFSDoubleMintEmissionDeclined = "double mint emission declined" // Warn in ../node/pkg/innerring/processors/frostfs/process_assets.go
FrostFSCantGetGasBalanceOfTheNode = "can't get gas balance of the node" // Error in ../node/pkg/innerring/processors/frostfs/process_assets.go
FrostFSGasBalanceThresholdHasBeenReached = "gas balance threshold has been reached" // Warn in ../node/pkg/innerring/processors/frostfs/process_assets.go
FrostFSCantTransferNativeGasToReceiver = "can't transfer native gas to receiver" // Error in ../node/pkg/innerring/processors/frostfs/process_assets.go
FrostFSNonAlphabetModeIgnoreWithdraw = "non alphabet mode, ignore withdraw" // Info in ../node/pkg/innerring/processors/frostfs/process_assets.go
FrostFSCantCreateLockAccount = "can't create lock account" // Error in ../node/pkg/innerring/processors/frostfs/process_assets.go
FrostFSCantLockAssetsForWithdraw = "can't lock assets for withdraw" // Error in ../node/pkg/innerring/processors/frostfs/process_assets.go
FrostFSNonAlphabetModeIgnoreCheque = "non alphabet mode, ignore cheque" // Info in ../node/pkg/innerring/processors/frostfs/process_assets.go
FrostFSCantTransferAssetsToFedContract = "can't transfer assets to fed contract" // Error in ../node/pkg/innerring/processors/frostfs/process_assets.go
GovernanceNewEvent = "new event" // Info in ../node/pkg/innerring/processors/governance/handlers.go
GovernanceGovernanceWorkerPoolDrained = "governance worker pool drained" // Warn in ../node/pkg/innerring/processors/governance/handlers.go
GovernanceNonAlphabetModeIgnoreAlphabetSync = "non alphabet mode, ignore alphabet sync" // Info in ../node/pkg/innerring/processors/governance/process_update.go
GovernanceCantFetchAlphabetListFromMainNet = "can't fetch alphabet list from main net" // Error in ../node/pkg/innerring/processors/governance/process_update.go
GovernanceCantFetchAlphabetListFromSideChain = "can't fetch alphabet list from side chain" // Error in ../node/pkg/innerring/processors/governance/process_update.go
GovernanceCantMergeAlphabetListsFromMainNetAndSideChain = "can't merge alphabet lists from main net and side chain" // Error in ../node/pkg/innerring/processors/governance/process_update.go
GovernanceNoGovernanceUpdateAlphabetListHasNotBeenChanged = "no governance update, alphabet list has not been changed" // Info in ../node/pkg/innerring/processors/governance/process_update.go
GovernanceAlphabetListHasBeenChangedStartingUpdate = "alphabet list has been changed, starting update" // Info in ../node/pkg/innerring/processors/governance/process_update.go
GovernanceCantVoteForSideChainCommittee = "can't vote for side chain committee" // Error in ../node/pkg/innerring/processors/governance/process_update.go
GovernanceFinishedAlphabetListUpdate = "finished alphabet list update" // Info in ../node/pkg/innerring/processors/governance/process_update.go
GovernanceCantFetchInnerRingListFromSideChain = "can't fetch inner ring list from side chain" // Error in ../node/pkg/innerring/processors/governance/process_update.go
GovernanceCantCreateNewInnerRingListWithNewAlphabetKeys = "can't create new inner ring list with new alphabet keys" // Error in ../node/pkg/innerring/processors/governance/process_update.go
GovernanceUpdateOfTheInnerRingList = "update of the inner ring list" // Info in ../node/pkg/innerring/processors/governance/process_update.go
GovernanceCantUpdateInnerRingListWithNewAlphabetKeys = "can't update inner ring list with new alphabet keys" // Error in ../node/pkg/innerring/processors/governance/process_update.go
GovernanceCantUpdateListOfNotaryNodesInSideChain = "can't update list of notary nodes in side chain" // Error in ../node/pkg/innerring/processors/governance/process_update.go
GovernanceCantUpdateListOfAlphabetNodesInFrostfsContract = "can't update list of alphabet nodes in frostfs contract" // Error in ../node/pkg/innerring/processors/governance/process_update.go
NetmapNetmapWorkerPool = "netmap worker pool" // Debug in ../node/pkg/innerring/processors/netmap/processor.go
NetmapTick = "tick" // Info in ../node/pkg/innerring/processors/netmap/handlers.go
NetmapNetmapWorkerPoolDrained = "netmap worker pool drained" // Warn in ../node/pkg/innerring/processors/netmap/handlers.go
NetmapNetmapCleanUpRoutineIsDisabled518 = "netmap clean up routine is disabled" // Debug in ../node/pkg/innerring/processors/netmap/handlers.go
NetmapNonAlphabetModeIgnoreNewNetmapCleanupTick = "non alphabet mode, ignore new netmap cleanup tick" // Info in ../node/pkg/innerring/processors/netmap/process_cleanup.go
NetmapCantDecodePublicKeyOfNetmapNode = "can't decode public key of netmap node" // Warn in ../node/pkg/innerring/processors/netmap/process_cleanup.go
NetmapVoteToRemoveNodeFromNetmap = "vote to remove node from netmap" // Info in ../node/pkg/innerring/processors/netmap/process_cleanup.go
NetmapCantInvokeNetmapUpdateState = "can't invoke netmap.UpdateState" // Error in ../node/pkg/innerring/processors/netmap/process_cleanup.go
NetmapCantIterateOnNetmapCleanerCache = "can't iterate on netmap cleaner cache" // Warn in ../node/pkg/innerring/processors/netmap/process_cleanup.go
NetmapCantGetEpochDuration = "can't get epoch duration" // Warn in ../node/pkg/innerring/processors/netmap/process_epoch.go
NetmapCantGetTransactionHeight = "can't get transaction height" // Warn in ../node/pkg/innerring/processors/netmap/process_epoch.go
NetmapCantResetEpochTimer = "can't reset epoch timer" // Warn in ../node/pkg/innerring/processors/netmap/process_epoch.go
NetmapCantGetNetmapSnapshotToPerformCleanup = "can't get netmap snapshot to perform cleanup" // Warn in ../node/pkg/innerring/processors/netmap/process_epoch.go
NetmapCantStartContainerSizeEstimation = "can't start container size estimation" // Warn in ../node/pkg/innerring/processors/netmap/process_epoch.go
NetmapNonAlphabetModeIgnoreNewEpochTick = "non alphabet mode, ignore new epoch tick" // Info in ../node/pkg/innerring/processors/netmap/process_epoch.go
NetmapNextEpoch = "next epoch" // Debug in ../node/pkg/innerring/processors/netmap/process_epoch.go
NetmapCantInvokeNetmapNewEpoch = "can't invoke netmap.NewEpoch" // Error in ../node/pkg/innerring/processors/netmap/process_epoch.go
NetmapNonAlphabetModeIgnoreNewPeerNotification = "non alphabet mode, ignore new peer notification" // Info in ../node/pkg/innerring/processors/netmap/process_peers.go
NetmapNonhaltNotaryTransaction = "non-halt notary transaction" // Warn in ../node/pkg/innerring/processors/netmap/process_peers.go
NetmapCantParseNetworkMapCandidate = "can't parse network map candidate" // Warn in ../node/pkg/innerring/processors/netmap/process_peers.go
NetmapCouldNotVerifyAndUpdateInformationAboutNetworkMapCandidate = "could not verify and update information about network map candidate" // Warn in ../node/pkg/innerring/processors/netmap/process_peers.go
NetmapApprovingNetworkMapCandidate = "approving network map candidate" // Info in ../node/pkg/innerring/processors/netmap/process_peers.go
NetmapCantInvokeNetmapAddPeer = "can't invoke netmap.AddPeer" // Error in ../node/pkg/innerring/processors/netmap/process_peers.go
NetmapNonAlphabetModeIgnoreUpdatePeerNotification = "non alphabet mode, ignore update peer notification" // Info in ../node/pkg/innerring/processors/netmap/process_peers.go
NetmapPreventSwitchingNodeToMaintenanceState = "prevent switching node to maintenance state" // Info in ../node/pkg/innerring/processors/netmap/process_peers.go
NetmapCantInvokeNetmapUpdatePeer = "can't invoke netmap.UpdatePeer" // Error in ../node/pkg/innerring/processors/netmap/process_peers.go
FrostFSIRInternalError = "internal error" // Info in ../node/cmd/frostfs-ir/main.go
FrostFSIRCouldNotShutdownHTTPServer = "could not shutdown HTTP server" // Debug in ../node/cmd/frostfs-ir/main.go
FrostFSIRApplicationStopped = "application stopped" // Info in ../node/cmd/frostfs-ir/main.go
FrostFSIRCouldntCreateRPCClientForEndpoint = "could not create RPC client for endpoint" // Debug in ../node/pkg/morph/client/constructor.go
FrostFSIRCreatedRPCClientForEndpoint = "created RPC client for endpoint" // Info in ../node/pkg/morph/client/constructor.go
FrostFSIRReloadExtraWallets = "reload extra wallets" // Info in ../node/cmd/frostfs-ir/config.go
FrostFSNodeCouldNotReadCertificateFromFile = "could not read certificate from file" // Error in ../node/cmd/frostfs-node/grpc.go
FrostFSNodeCantListenGRPCEndpoint = "can't listen gRPC endpoint" // Error in ../node/cmd/frostfs-node/grpc.go
FrostFSNodeStopListeningGRPCEndpoint = "stop listening gRPC endpoint" // Info in ../node/cmd/frostfs-node/grpc.go
FrostFSNodeStartListeningGRPCEndpoint = "start listening gRPC endpoint" // Info in ../node/cmd/frostfs-node/grpc.go
FrostFSNodeStoppingGRPCServer = "stopping gRPC server..." // Info in ../node/cmd/frostfs-node/grpc.go
FrostFSNodeGRPCCannotShutdownGracefullyForcingStop = "gRPC cannot shutdown gracefully, forcing stop" // Info in ../node/cmd/frostfs-node/grpc.go
FrostFSNodeGRPCServerStoppedSuccessfully = "gRPC server stopped successfully" // Info in ../node/cmd/frostfs-node/grpc.go
FrostFSNodeWaitingForAllProcessesToStop = "waiting for all processes to stop" // Debug in ../node/cmd/frostfs-node/main.go
FrostFSNodeStartedLocalNodesMaintenance = "started local node's maintenance" // Info in ../node/cmd/frostfs-node/config.go
FrostFSNodeStoppedLocalNodesMaintenance = "stopped local node's maintenance" // Info in ../node/cmd/frostfs-node/config.go
FrostFSNodeFailedToAttachShardToEngine = "failed to attach shard to engine" // Error in ../node/cmd/frostfs-node/config.go
FrostFSNodeShardAttachedToEngine = "shard attached to engine" // Info in ../node/cmd/frostfs-node/config.go
FrostFSNodeClosingComponentsOfTheStorageEngine = "closing components of the storage engine..." // Info in ../node/cmd/frostfs-node/config.go
FrostFSNodeStorageEngineClosingFailure = "storage engine closing failure" // Info in ../node/cmd/frostfs-node/config.go
FrostFSNodeAllComponentsOfTheStorageEngineClosedSuccessfully = "all components of the storage engine closed successfully" // Info in ../node/cmd/frostfs-node/config.go
FrostFSNodeBootstrappingWithTheMaintenanceState = "bootstrapping with the maintenance state" // Info in ../node/cmd/frostfs-node/config.go
FrostFSNodeBootstrappingWithOnlineState = "bootstrapping with online state" // Info in ../node/cmd/frostfs-node/config.go
FrostFSNodeTerminationSignalHasBeenReceivedStopping = "termination signal has been received, stopping..." // Info in ../node/cmd/frostfs-node/config.go
FrostFSNodeTerminationSignalProcessingIsComplete = "termination signal processing is complete" // Info in ../node/cmd/frostfs-node/config.go
FrostFSNodeInternalApplicationError = "internal application error" // Warn in ../node/cmd/frostfs-node/config.go
FrostFSNodeInternalErrorProcessingIsComplete = "internal error processing is complete" // Info in ../node/cmd/frostfs-node/config.go
FrostFSNodeSIGHUPHasBeenReceivedRereadingConfiguration = "SIGHUP has been received, rereading configuration..." // Info in ../node/cmd/frostfs-node/config.go
FrostFSNodeConfigurationReading = "configuration reading" // Error in ../node/cmd/frostfs-node/config.go
FrostFSNodeLoggerConfigurationPreparation = "logger configuration preparation" // Error in ../node/cmd/frostfs-node/config.go
FrostFSNodeTracingConfigationUpdated = "tracing configation updated" // Info in ../node/cmd/frostfs-node/config.go
FrostFSNodeStorageEngineConfigurationUpdate = "storage engine configuration update" // Error in ../node/cmd/frostfs-node/config.go
FrostFSNodeUpdatedConfigurationApplying = "updated configuration applying" // Error in ../node/cmd/frostfs-node/config.go
FrostFSNodeConfigurationHasBeenReloadedSuccessfully = "configuration has been reloaded successfully" // Info in ../node/cmd/frostfs-node/config.go
FrostFSNodeReadNewlyCreatedContainerAfterTheNotification = "read newly created container after the notification" // Error in ../node/cmd/frostfs-node/container.go
FrostFSNodeContainerCreationEventsReceipt = "container creation event's receipt" // Debug in ../node/cmd/frostfs-node/container.go
FrostFSNodeContainerRemovalEventsReceipt = "container removal event's receipt" // Debug in ../node/cmd/frostfs-node/container.go
FrostFSNodeSaveUsedSpaceAnnouncementInContract = "save used space announcement in contract" // Debug in ../node/cmd/frostfs-node/container.go
FrostFSNodeFailedToCalculateContainerSizeInStorageEngine = "failed to calculate container size in storage engine" // Debug in ../node/cmd/frostfs-node/container.go
FrostFSNodeContainerSizeInStorageEngineCalculatedSuccessfully = "container size in storage engine calculated successfully" // Debug in ../node/cmd/frostfs-node/container.go
FrostFSNodeNotificatorCouldNotListContainers = "notificator: could not list containers" // Error in ../node/cmd/frostfs-node/notificator.go
FrostFSNodeNotificatorCouldNotSelectObjectsFromContainer = "notificator: could not select objects from container" // Error in ../node/cmd/frostfs-node/notificator.go
FrostFSNodeNotificatorCouldNotProcessObject = "notificator: could not process object" // Error in ../node/cmd/frostfs-node/notificator.go
FrostFSNodeNotificatorFinishedProcessingObjectNotifications = "notificator: finished processing object notifications" // Debug in ../node/cmd/frostfs-node/notificator.go
FrostFSNodeCouldNotWriteObjectNotification = "could not write object notification" // Warn in ../node/cmd/frostfs-node/notificator.go
FrostFSNodeCouldNotGetMaxObjectSizeValue = "could not get max object size value" // Error in ../node/cmd/frostfs-node/object.go
FrostFSNodeCouldNotInhumeMarkRedundantCopyAsGarbage = "could not inhume mark redundant copy as garbage" // Warn in ../node/cmd/frostfs-node/object.go
FrostFSNodeFailedInitTracing = "failed init tracing" // Error in ../node/cmd/frostfs-node/tracing.go
FrostFSNodeFailedShutdownTracing = "failed shutdown tracing" // Error in ../node/cmd/frostfs-node/tracing.go
FrostFSNodeFailedToCreateNeoRPCClient = "failed to create neo RPC client" // Info in ../node/cmd/frostfs-node/morph.go
FrostFSNodeClosingMorphComponents = "closing morph components..." // Info in ../node/cmd/frostfs-node/morph.go
FrostFSNodeFailedToSetGroupSignerScopeContinueWithGlobal = "failed to set group signer scope, continue with Global" // Info in ../node/cmd/frostfs-node/morph.go
FrostFSNodeNotarySupport = "notary support" // Info in ../node/cmd/frostfs-node/morph.go
FrostFSNodeMorphcacheTTLFetchedFromNetwork = "morph.cache_ttl fetched from network" // Debug in ../node/cmd/frostfs-node/morph.go
FrostFSNodeNotaryDepositHasAlreadyBeenMade = "notary deposit has already been made" // Info in ../node/cmd/frostfs-node/morph.go
FrostFSNodeCantGetLastProcessedSideChainBlockNumber = "can't get last processed side chain block number" // Warn in ../node/cmd/frostfs-node/morph.go
FrostFSNodeNewEpochEventFromSidechain = "new epoch event from sidechain" // Info in ../node/cmd/frostfs-node/morph.go
FrostFSNodeNewBlock = "new block" // Debug in ../node/cmd/frostfs-node/morph.go
FrostFSNodeCantUpdatePersistentState = "can't update persistent state" // Warn in ../node/cmd/frostfs-node/morph.go
FrostFSNodeCantSendRebootstrapTx = "can't send re-bootstrap tx" // Warn in ../node/cmd/frostfs-node/netmap.go
FrostFSNodeCouldNotUpdateNodeStateOnNewEpoch = "could not update node state on new epoch" // Error in ../node/cmd/frostfs-node/netmap.go
FrostFSNodeCouldNotMakeNotaryDeposit = "could not make notary deposit" // Error in ../node/cmd/frostfs-node/netmap.go
FrostFSNodeInitialNetworkState = "initial network state" // Info in ../node/cmd/frostfs-node/netmap.go
FrostFSNodeTreeServiceIsNotEnabledSkipInitialization = "tree service is not enabled, skip initialization" // Info in ../node/cmd/frostfs-node/tree.go
FrostFSNodeCouldNotSynchronizeTreeService = "could not synchronize Tree Service" // Error in ../node/cmd/frostfs-node/tree.go
FrostFSNodeRemovingAllTreesForContainer = "removing all trees for container" // Debug in ../node/cmd/frostfs-node/tree.go
FrostFSNodeContainerRemovalEventReceivedButTreesWerentRemoved = "container removal event received, but trees weren't removed" // Error in ../node/cmd/frostfs-node/tree.go
FrostFSNodeCantListenGRPCEndpointControl = "can't listen gRPC endpoint (control)" // Error in ../node/cmd/frostfs-node/control.go
FrostFSNodePolicerIsDisabled = "policer is disabled"
CommonApplicationStarted = "application started"
ShardGCCollectingExpiredObjectsStarted = "collecting expired objects started"
@ -493,4 +494,24 @@ const (
ShardDeleteCantDeleteFromWriteCache = "can't delete object from write cache"
FrostFSNodeNodeIsUnderMaintenanceSkipInitialBootstrap = "the node is under maintenance, skip initial bootstrap"
EngineCouldNotChangeShardModeToDisabled = "could not change shard mode to disabled"
NetmapNodeAlreadyInCandidateListOnlineSkipInitialBootstrap = "the node is already in candidate list with online state, skip initial bootstrap"
RPConnectionLost = "RPC connection lost, attempting reconnect"
RPCNodeSwitchFailure = "can't switch RPC node"
FSTreeCantReadFile = "can't read a file"
FSTreeCantUnmarshalObject = "can't unmarshal an object"
FSTreeCantFushObjectBlobstor = "can't flush an object to blobstor"
FSTreeCantUpdateID = "can't update object storage ID"
FSTreeCantDecodeDBObjectAddress = "can't decode object address from the DB"
PutSingleRedirectFailure = "failed to redirect PutSingle request"
StorageIDRetrievalFailure = "can't get storage ID from metabase"
ObjectRemovalFailureBlobStor = "can't remove object from blobStor"
CandidateStatusPriority = "candidate status is different from the netmap status, the former takes priority"
TombstoneExpirationParseFailure = "tombstone getter: could not parse tombstone expiration epoch"
FrostFSNodeCantUpdateObjectStorageID = "can't update object storage ID"
FrostFSNodeCantFlushObjectToBlobstor = "can't flush an object to blobstor"
FrostFSNodeCantDecodeObjectAddressFromDB = "can't decode object address from the DB" // Error in ../node/cmd/frostfs-node/morph.go
FrostFSNodeCantUnmarshalObjectFromDB = "can't unmarshal an object from the DB" // Error in ../node/cmd/frostfs-node/morph.go
RuntimeSoftMemoryLimitUpdated = "soft runtime memory limit value updated"
RuntimeSoftMemoryDefinedWithGOMEMLIMIT = "soft runtime memory defined with GOMEMLIMIT environment variable, config value skipped"
FailedToCountWritecacheItems = "failed to count writecache items"
)

View file

@ -13,6 +13,7 @@ import (
type Client interface {
ContainerAnnounceUsedSpace(context.Context, client.PrmAnnounceSpace) (*client.ResAnnounceSpace, error)
ObjectPutInit(context.Context, client.PrmObjectPutInit) (client.ObjectWriter, error)
ObjectPutSingle(context.Context, client.PrmObjectPutSingle) (*client.ResObjectPutSingle, error)
ObjectDelete(context.Context, client.PrmObjectDelete) (*client.ResObjectDelete, error)
ObjectGetInit(context.Context, client.PrmObjectGet) (*client.ObjectReader, error)
ObjectHead(context.Context, client.PrmObjectHead) (*client.ResObjectHead, error)

View file

@ -1,9 +1,6 @@
package container
import (
"errors"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
frostfscrypto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto"
@ -37,12 +34,6 @@ type Source interface {
Get(cid.ID) (*Container, error)
}
// IsErrNotFound checks if the error returned by Source.Get corresponds
// to the missing container.
func IsErrNotFound(err error) bool {
return errors.As(err, new(apistatus.ContainerNotFound))
}
// EACL groups information about the FrostFS container's extended ACL stored in
// the FrostFS network.
type EACL struct {

View file

@ -1,7 +1,7 @@
package object
import (
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
)
@ -9,5 +9,5 @@ import (
// object type.
type AddressWithType struct {
Address oid.Address
Type object.Type
Type objectSDK.Type
}

View file

@ -12,7 +12,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
frostfsecdsa "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto/ecdsa"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
)
@ -64,6 +64,8 @@ var errNoExpirationEpoch = errors.New("missing expiration epoch attribute")
var errTombstoneExpiration = errors.New("tombstone body and header contain different expiration values")
var errMissingSignature = errors.New("missing signature")
func defaultCfg() *cfg {
return new(cfg)
}
@ -87,7 +89,7 @@ func NewFormatValidator(opts ...FormatValidatorOption) *FormatValidator {
// If unprepared is true, only fields set by user are validated.
//
// Returns nil error if the object has valid structure.
func (v *FormatValidator) Validate(ctx context.Context, obj *object.Object, unprepared bool) error {
func (v *FormatValidator) Validate(ctx context.Context, obj *objectSDK.Object, unprepared bool) error {
if obj == nil {
return errNilObject
}
@ -119,7 +121,7 @@ func (v *FormatValidator) Validate(ctx context.Context, obj *object.Object, unpr
return fmt.Errorf("object did not pass expiration check: %w", err)
}
if err := object.CheckHeaderVerificationFields(obj); err != nil {
if err := objectSDK.CheckHeaderVerificationFields(obj); err != nil {
return fmt.Errorf("(%T) could not validate header fields: %w", v, err)
}
}
@ -132,11 +134,10 @@ func (v *FormatValidator) Validate(ctx context.Context, obj *object.Object, unpr
return nil
}
func (v *FormatValidator) validateSignatureKey(obj *object.Object) error {
func (v *FormatValidator) validateSignatureKey(obj *objectSDK.Object) error {
sig := obj.Signature()
if sig == nil {
// TODO(@cthulhu-rider): #468 use "const" error
return errors.New("missing signature")
return errMissingSignature
}
var sigV2 refs.Signature
@ -157,8 +158,6 @@ func (v *FormatValidator) validateSignatureKey(obj *object.Object) error {
return v.checkOwnerKey(*obj.OwnerID(), key)
}
// FIXME: #1159 perform token verification
return nil
}
@ -178,13 +177,13 @@ func (v *FormatValidator) checkOwnerKey(id user.ID, key frostfsecdsa.PublicKey)
// - object.TypeTombstone;
// - object.TypeLock.
type ContentMeta struct {
typ object.Type
typ objectSDK.Type
objs []oid.ID
}
// Type returns object's type.
func (i ContentMeta) Type() object.Type {
func (i ContentMeta) Type() objectSDK.Type {
return i.typ
}
@ -197,17 +196,17 @@ func (i ContentMeta) Objects() []oid.ID {
}
// ValidateContent validates payload content according to the object type.
func (v *FormatValidator) ValidateContent(o *object.Object) (ContentMeta, error) {
func (v *FormatValidator) ValidateContent(o *objectSDK.Object) (ContentMeta, error) {
meta := ContentMeta{
typ: o.Type(),
}
switch o.Type() {
case object.TypeTombstone:
case objectSDK.TypeTombstone:
if err := v.fillAndValidateTombstoneMeta(o, &meta); err != nil {
return ContentMeta{}, err
}
case object.TypeLock:
case objectSDK.TypeLock:
if err := v.fillAndValidateLockMeta(o, &meta); err != nil {
return ContentMeta{}, err
}
@ -218,7 +217,7 @@ func (v *FormatValidator) ValidateContent(o *object.Object) (ContentMeta, error)
return meta, nil
}
func (v *FormatValidator) fillAndValidateLockMeta(o *object.Object, meta *ContentMeta) error {
func (v *FormatValidator) fillAndValidateLockMeta(o *objectSDK.Object, meta *ContentMeta) error {
if len(o.Payload()) == 0 {
return errors.New("empty payload in lock")
}
@ -240,7 +239,7 @@ func (v *FormatValidator) fillAndValidateLockMeta(o *object.Object, meta *Conten
return fmt.Errorf("lock object expiration: %d; current: %d", lockExp, currEpoch)
}
var lock object.Lock
var lock objectSDK.Lock
if err = lock.Unmarshal(o.Payload()); err != nil {
return fmt.Errorf("decode lock payload: %w", err)
@ -256,12 +255,12 @@ func (v *FormatValidator) fillAndValidateLockMeta(o *object.Object, meta *Conten
return nil
}
func (v *FormatValidator) fillAndValidateTombstoneMeta(o *object.Object, meta *ContentMeta) error {
func (v *FormatValidator) fillAndValidateTombstoneMeta(o *objectSDK.Object, meta *ContentMeta) error {
if len(o.Payload()) == 0 {
return fmt.Errorf("(%T) empty payload in tombstone", v)
}
tombstone := object.NewTombstone()
tombstone := objectSDK.NewTombstone()
if err := tombstone.Unmarshal(o.Payload()); err != nil {
return fmt.Errorf("(%T) could not unmarshal tombstone content: %w", v, err)
@ -287,7 +286,7 @@ func (v *FormatValidator) fillAndValidateTombstoneMeta(o *object.Object, meta *C
var errExpired = errors.New("object has expired")
func (v *FormatValidator) checkExpiration(ctx context.Context, obj *object.Object) error {
func (v *FormatValidator) checkExpiration(ctx context.Context, obj *objectSDK.Object) error {
exp, err := expirationEpochAttribute(obj)
if err != nil {
if errors.Is(err, errNoExpirationEpoch) {
@ -321,7 +320,7 @@ func (v *FormatValidator) checkExpiration(ctx context.Context, obj *object.Objec
return nil
}
func expirationEpochAttribute(obj *object.Object) (uint64, error) {
func expirationEpochAttribute(obj *objectSDK.Object) (uint64, error) {
for _, a := range obj.Attributes() {
if a.Key() != objectV2.SysAttributeExpEpoch && a.Key() != objectV2.SysAttributeExpEpochNeoFS {
continue
@ -338,7 +337,7 @@ var (
errEmptyAttrVal = errors.New("empty attribute value")
)
func (v *FormatValidator) checkAttributes(obj *object.Object) error {
func (v *FormatValidator) checkAttributes(obj *objectSDK.Object) error {
as := obj.Attributes()
mUnique := make(map[string]struct{}, len(as))
@ -362,7 +361,7 @@ func (v *FormatValidator) checkAttributes(obj *object.Object) error {
var errIncorrectOwner = errors.New("incorrect object owner")
func (v *FormatValidator) checkOwner(obj *object.Object) error {
func (v *FormatValidator) checkOwner(obj *objectSDK.Object) error {
if idOwner := obj.OwnerID(); idOwner == nil || len(idOwner.WalletBytes()) == 0 {
return errIncorrectOwner
}

View file

@ -8,7 +8,7 @@ import (
objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
sessiontest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session/test"
@ -17,11 +17,11 @@ import (
"github.com/stretchr/testify/require"
)
func blankValidObject(key *ecdsa.PrivateKey) *object.Object {
func blankValidObject(key *ecdsa.PrivateKey) *objectSDK.Object {
var idOwner user.ID
user.IDFromKey(&idOwner, key.PublicKey)
obj := object.New()
obj := objectSDK.New()
obj.SetContainerID(cidtest.ID())
obj.SetOwnerID(&idOwner)
@ -66,20 +66,20 @@ func TestFormatValidator_Validate(t *testing.T) {
})
t.Run("nil identifier", func(t *testing.T) {
obj := object.New()
obj := objectSDK.New()
require.ErrorIs(t, v.Validate(context.Background(), obj, false), errNilID)
})
t.Run("nil container identifier", func(t *testing.T) {
obj := object.New()
obj := objectSDK.New()
obj.SetID(oidtest.ID())
require.ErrorIs(t, v.Validate(context.Background(), obj, true), errNilCID)
})
t.Run("unsigned object", func(t *testing.T) {
obj := object.New()
obj := objectSDK.New()
obj.SetContainerID(cidtest.ID())
obj.SetID(oidtest.ID())
@ -94,12 +94,12 @@ func TestFormatValidator_Validate(t *testing.T) {
err := tok.Sign(ownerKey.PrivateKey)
require.NoError(t, err)
obj := object.New()
obj := objectSDK.New()
obj.SetContainerID(cidtest.ID())
obj.SetSessionToken(tok)
obj.SetOwnerID(&idOwner)
require.NoError(t, object.SetIDWithSignature(ownerKey.PrivateKey, obj))
require.NoError(t, objectSDK.SetIDWithSignature(ownerKey.PrivateKey, obj))
require.NoError(t, v.Validate(context.Background(), obj, false))
})
@ -107,20 +107,20 @@ func TestFormatValidator_Validate(t *testing.T) {
t.Run("correct w/o session token", func(t *testing.T) {
obj := blankValidObject(&ownerKey.PrivateKey)
require.NoError(t, object.SetIDWithSignature(ownerKey.PrivateKey, obj))
require.NoError(t, objectSDK.SetIDWithSignature(ownerKey.PrivateKey, obj))
require.NoError(t, v.Validate(context.Background(), obj, false))
})
t.Run("tombstone content", func(t *testing.T) {
obj := object.New()
obj.SetType(object.TypeTombstone)
obj := objectSDK.New()
obj.SetType(objectSDK.TypeTombstone)
obj.SetContainerID(cidtest.ID())
_, err := v.ValidateContent(obj)
require.Error(t, err) // no tombstone content
content := object.NewTombstone()
content := objectSDK.NewTombstone()
content.SetMembers([]oid.ID{oidtest.ID()})
data, err := content.Marshal()
@ -141,7 +141,7 @@ func TestFormatValidator_Validate(t *testing.T) {
_, err = v.ValidateContent(obj)
require.Error(t, err) // no expiration epoch in tombstone
var expirationAttribute object.Attribute
var expirationAttribute objectSDK.Attribute
expirationAttribute.SetKey(objectV2.SysAttributeExpEpoch)
expirationAttribute.SetValue(strconv.Itoa(10))
@ -163,20 +163,20 @@ func TestFormatValidator_Validate(t *testing.T) {
require.NoError(t, err) // all good
require.EqualValues(t, []oid.ID{id}, contentGot.Objects())
require.Equal(t, object.TypeTombstone, contentGot.Type())
require.Equal(t, objectSDK.TypeTombstone, contentGot.Type())
})
t.Run("expiration", func(t *testing.T) {
fn := func(val string) *object.Object {
fn := func(val string) *objectSDK.Object {
obj := blankValidObject(&ownerKey.PrivateKey)
var a object.Attribute
var a objectSDK.Attribute
a.SetKey(objectV2.SysAttributeExpEpoch)
a.SetValue(val)
obj.SetAttributes(a)
require.NoError(t, object.SetIDWithSignature(ownerKey.PrivateKey, obj))
require.NoError(t, objectSDK.SetIDWithSignature(ownerKey.PrivateKey, obj))
return obj
}
@ -221,11 +221,11 @@ func TestFormatValidator_Validate(t *testing.T) {
t.Run("duplication", func(t *testing.T) {
obj := blankValidObject(&ownerKey.PrivateKey)
var a1 object.Attribute
var a1 objectSDK.Attribute
a1.SetKey("key1")
a1.SetValue("val1")
var a2 object.Attribute
var a2 objectSDK.Attribute
a2.SetKey("key2")
a2.SetValue("val2")
@ -244,7 +244,7 @@ func TestFormatValidator_Validate(t *testing.T) {
t.Run("empty value", func(t *testing.T) {
obj := blankValidObject(&ownerKey.PrivateKey)
var a object.Attribute
var a objectSDK.Attribute
a.SetKey("key")
obj.SetAttributes(a)

View file

@ -1,12 +1,12 @@
package object
import (
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
)
// AddressOf returns the address of the object.
func AddressOf(obj *object.Object) oid.Address {
func AddressOf(obj *objectSDK.Object) oid.Address {
var addr oid.Address
id, ok := obj.ID()

View file

@ -16,7 +16,6 @@ func TestParseContractsSuccess(t *testing.T) {
contracts:
frostfs: ee3dee6d05dc79c24a5b8f6985e10d68b7cacc62
processing: 597f5894867113a41e192801709c02497f611de8
audit: 219e37aed2180b87e7fe945dbf97d67125e8d73f
balance: d2aa48d14b17b11bc4c68205027884a96706dd16
container: ed4a7a66fe3f9bfe50f214b49be8f215a3c886b6
frostfsid: 9f5866decbc751a099e74c7c7bc89f609201755a
@ -138,13 +137,12 @@ contracts:
func TestParseContractsInvalid(t *testing.T) {
t.Parallel()
t.Run("invalid audit contract", func(t *testing.T) {
t.Run("invalid frostfs contract", func(t *testing.T) {
t.Parallel()
file := strings.NewReader(`
contracts:
frostfs: invalid_data
processing: 597f5894867113a41e192801709c02497f611de8
audit: 219e37aed2180b87e7fe945dbf97d67125e8d73f
balance: d2aa48d14b17b11bc4c68205027884a96706dd16
container: ed4a7a66fe3f9bfe50f214b49be8f215a3c886b6
frostfsid: 9f5866decbc751a099e74c7c7bc89f609201755a
@ -171,7 +169,6 @@ contracts:
contracts:
frostfs: ee3dee6d05dc79c24a5b8f6985e10d68b7cacc62
processing: 597f5894867113a41e192801709c02497f611de8
audit: 219e37aed2180b87e7fe945dbf97d67125e8d73f
balance: d2aa48d14b17b11bc4c68205027884a96706dd16
container: ed4a7a66fe3f9bfe50f214b49be8f215a3c886b6
frostfsid: 9f5866decbc751a099e74c7c7bc89f609201755a

View file

@ -246,11 +246,7 @@ func (s *Server) initAlphabetProcessor(cfg *viper.Viper) error {
}
err = bindMorphProcessor(s.alphabetProcessor, s)
if err != nil {
return err
}
return nil
return err
}
func (s *Server) initContainerProcessor(cfg *viper.Viper, cnrClient *container.Client,
@ -454,11 +450,7 @@ func (s *Server) initProcessors(cfg *viper.Viper, morphClients *serverMorphClien
}
err = s.initAlphabetProcessor(cfg)
if err != nil {
return err
}
return nil
return err
}
func (s *Server) initMorph(ctx context.Context, cfg *viper.Viper, errChan chan<- error) (*chainParams, error) {

View file

@ -282,7 +282,7 @@ func (c *testContainerClient) Get(cid []byte) (*containercore.Container, error)
if cont, found := c.get[key]; found {
return cont, nil
}
return nil, apistatus.ContainerNotFound{}
return nil, new(apistatus.ContainerNotFound)
}
type testIDClient struct {

View file

@ -45,7 +45,7 @@ func (np *Processor) processDeposit(deposit frostfsEvent.Deposit) bool {
val, ok := np.mintEmitCache.Get(receiver.String())
if ok && val+np.mintEmitThreshold >= curEpoch {
np.log.Warn(logs.FrostFSDoubleMintEmissionDeclined,
zap.String("receiver", receiver.String()),
zap.Stringer("receiver", receiver),
zap.Uint64("last_emission", val),
zap.Uint64("current_epoch", curEpoch))

View file

@ -60,8 +60,7 @@ func TestNewAlphabetList(t *testing.T) {
orig := keys.PublicKeys{k[1], k[2], k[3], k[4]}
main := keys.PublicKeys{k[1], k[2], k[5], k[4]}
exp := make(keys.PublicKeys, len(main))
copy(exp, main)
exp := main.Copy()
sort.Sort(exp)
got, err := newAlphabetList(orig, main)

Some files were not shown because too many files have changed in this diff Show more