From 9ee1bd36697276bf2c91aad7e6f5d55ca5cbe376 Mon Sep 17 00:00:00 2001
From: Aleksey Savchuk
Date: Tue, 23 Jul 2024 10:31:37 +0300
Subject: [PATCH 001/705] [#1265] *: Run gofumpt
Signed-off-by: Aleksey Savchuk
---
pkg/innerring/processors/netmap/process_epoch.go | 1 -
pkg/innerring/processors/netmap/process_peers.go | 1 -
2 files changed, 2 deletions(-)
diff --git a/pkg/innerring/processors/netmap/process_epoch.go b/pkg/innerring/processors/netmap/process_epoch.go
index f3cb9837f..4dfa3997b 100644
--- a/pkg/innerring/processors/netmap/process_epoch.go
+++ b/pkg/innerring/processors/netmap/process_epoch.go
@@ -51,7 +51,6 @@ func (np *Processor) processNewEpoch(ev netmapEvent.NewEpoch) bool {
if epoch > 0 && np.alphabetState.IsAlphabet() { // estimates are invalid in genesis epoch
err = np.containerWrp.StartEstimation(prm)
-
if err != nil {
np.log.Warn(logs.NetmapCantStartContainerSizeEstimation,
zap.Uint64("epoch", epoch),
diff --git a/pkg/innerring/processors/netmap/process_peers.go b/pkg/innerring/processors/netmap/process_peers.go
index 41e4bfb7e..9e6e8c283 100644
--- a/pkg/innerring/processors/netmap/process_peers.go
+++ b/pkg/innerring/processors/netmap/process_peers.go
@@ -83,7 +83,6 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) bool {
methodAddPeerNotary,
nodeInfoBinary,
)
-
if err != nil {
np.log.Error(logs.NetmapCantInvokeNetmapAddPeer, zap.Error(err))
return false
From 658e3cb92f2901c95a28e6eb742aecfb63835469 Mon Sep 17 00:00:00 2001
From: Evgenii Stratonikov
Date: Tue, 23 Jul 2024 09:35:41 +0300
Subject: [PATCH 002/705] [#1264] go.mod: Update bbolt to v1.3.10
Signed-off-by: Evgenii Stratonikov
---
go.mod | 4 ++--
go.sum | 8 ++++----
2 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/go.mod b/go.mod
index ee8b1bb16..a89520e34 100644
--- a/go.mod
+++ b/go.mod
@@ -34,13 +34,13 @@ require (
github.com/spf13/viper v1.18.2
github.com/ssgreg/journald v1.0.0
github.com/stretchr/testify v1.9.0
- go.etcd.io/bbolt v1.3.9
+ go.etcd.io/bbolt v1.3.10
go.opentelemetry.io/otel v1.22.0
go.opentelemetry.io/otel/trace v1.22.0
go.uber.org/zap v1.27.0
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a
golang.org/x/sync v0.6.0
- golang.org/x/sys v0.18.0
+ golang.org/x/sys v0.22.0
golang.org/x/term v0.18.0
google.golang.org/grpc v1.63.2
google.golang.org/protobuf v1.33.0
diff --git a/go.sum b/go.sum
index c7c3b87eb..163c53b27 100644
--- a/go.sum
+++ b/go.sum
@@ -285,8 +285,8 @@ github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7Jul
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
-go.etcd.io/bbolt v1.3.9 h1:8x7aARPEXiXbHmtUwAIv7eV2fQFHrLLavdiJ3uzJXoI=
-go.etcd.io/bbolt v1.3.9/go.mod h1:zaO32+Ti0PK1ivdPtgMESzuzL2VPoIG1PCQNvOdo/dE=
+go.etcd.io/bbolt v1.3.10 h1:+BqfJTcCzTItrop8mq/lbzL8wSGtj94UO/3U31shqG0=
+go.etcd.io/bbolt v1.3.10/go.mod h1:bK3UQLPJZly7IlNmV7uVHJDxfe5aK9Ll93e/74Y9oEQ=
go.mongodb.org/mongo-driver v1.11.4/go.mod h1:PTSz5yu21bkT/wXpkS7WR5f0ddqw5quethTUn9WM+2g=
go.mongodb.org/mongo-driver v1.13.1 h1:YIc7HTYsKndGK4RFzJ3covLz1byri52x0IoMB0Pt/vk=
go.mongodb.org/mongo-driver v1.13.1/go.mod h1:wcDf1JBCXy2mOW0bWHwO/IOYqdca1MPCwDtFu/Z9+eo=
@@ -379,8 +379,8 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
-golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4=
-golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI=
+golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
From 7ddba70030118f681396f3eae37b6e3de3b06162 Mon Sep 17 00:00:00 2001
From: Evgenii Stratonikov
Date: Tue, 23 Jul 2024 09:39:14 +0300
Subject: [PATCH 003/705] [#1264] go.mod: Update dependencies
Signed-off-by: Evgenii Stratonikov
---
go.mod | 26 ++++++++++++------------
go.sum | 62 +++++++++++++++++++++++++++++-----------------------------
2 files changed, 44 insertions(+), 44 deletions(-)
diff --git a/go.mod b/go.mod
index a89520e34..0f608f74d 100644
--- a/go.mod
+++ b/go.mod
@@ -29,17 +29,17 @@ require (
github.com/paulmach/orb v0.11.0
github.com/prometheus/client_golang v1.19.0
github.com/spf13/cast v1.6.0
- github.com/spf13/cobra v1.8.0
+ github.com/spf13/cobra v1.8.1
github.com/spf13/pflag v1.0.5
- github.com/spf13/viper v1.18.2
+ github.com/spf13/viper v1.19.0
github.com/ssgreg/journald v1.0.0
github.com/stretchr/testify v1.9.0
go.etcd.io/bbolt v1.3.10
- go.opentelemetry.io/otel v1.22.0
- go.opentelemetry.io/otel/trace v1.22.0
+ go.opentelemetry.io/otel v1.24.0
+ go.opentelemetry.io/otel/trace v1.24.0
go.uber.org/zap v1.27.0
- golang.org/x/exp v0.0.0-20240119083558-1b970713d09a
- golang.org/x/sync v0.6.0
+ golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56
+ golang.org/x/sync v0.7.0
golang.org/x/sys v0.22.0
golang.org/x/term v0.18.0
google.golang.org/grpc v1.63.2
@@ -48,7 +48,7 @@ require (
)
require (
- github.com/sagikazarmark/locafero v0.4.0 // indirect
+ github.com/sagikazarmark/locafero v0.6.0 // indirect
github.com/sagikazarmark/slog-shim v0.1.0 // indirect
github.com/sourcegraph/conc v0.3.0 // indirect
go.mongodb.org/mongo-driver v1.13.1 // indirect
@@ -63,7 +63,7 @@ require (
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/consensys/bavard v0.1.13 // indirect
github.com/consensys/gnark-crypto v0.12.2-0.20231222162921-eb75782795d2 // indirect
- github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect
+ github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/davidmz/go-pageant v1.0.2 // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect
@@ -98,7 +98,7 @@ require (
github.com/nspcc-dev/go-ordered-json v0.0.0-20240301084351-0246b013f8b2 // indirect
github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20240521091047-78685785716d // indirect
github.com/nspcc-dev/rfc6979 v0.2.1 // indirect
- github.com/pelletier/go-toml/v2 v2.1.1 // indirect
+ github.com/pelletier/go-toml/v2 v2.2.2 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/prometheus/client_model v0.5.0 // indirect
github.com/prometheus/common v0.48.0 // indirect
@@ -114,15 +114,15 @@ require (
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.22.0 // indirect
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.22.0 // indirect
- go.opentelemetry.io/otel/metric v1.22.0 // indirect
+ go.opentelemetry.io/otel/metric v1.24.0 // indirect
go.opentelemetry.io/otel/sdk v1.22.0 // indirect
go.opentelemetry.io/proto/otlp v1.1.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
golang.org/x/crypto v0.21.0 // indirect
golang.org/x/net v0.23.0 // indirect
- golang.org/x/text v0.14.0 // indirect
- google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de // indirect
- google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de // indirect
+ golang.org/x/text v0.16.0 // indirect
+ google.golang.org/genproto/googleapis/api v0.0.0-20240311132316-a219d84964c2 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20240314234333-6e1732d8331c // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
lukechampine.com/blake3 v1.2.1 // indirect
rsc.io/tmplfunc v0.0.3 // indirect
diff --git a/go.sum b/go.sum
index 163c53b27..355e176f7 100644
--- a/go.sum
+++ b/go.sum
@@ -49,8 +49,8 @@ github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH
github.com/consensys/gnark-crypto v0.12.2-0.20231222162921-eb75782795d2 h1:tYj5Ydh5D7Xg2R1tJnoG36Yta7NVB8C0vx36oPA3Bbw=
github.com/consensys/gnark-crypto v0.12.2-0.20231222162921-eb75782795d2/go.mod h1:wKqwsieaKPThcFkHe0d0zMsbHEUWFmZcG7KBCse210o=
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
-github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM=
-github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
+github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4=
+github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
@@ -213,8 +213,8 @@ github.com/panjf2000/ants/v2 v2.9.0/go.mod h1:7ZxyxsqE4vvW0M7LSD8aI3cKwgFhBHbxnl
github.com/paulmach/orb v0.11.0 h1:JfVXJUBeH9ifc/OrhBY0lL16QsmPgpCHMlqSSYhcgAA=
github.com/paulmach/orb v0.11.0/go.mod h1:5mULz1xQfs3bmQm63QEJA6lNGujuRafwA5S/EnuLaLU=
github.com/paulmach/protoscan v0.2.1/go.mod h1:SpcSwydNLrxUGSDvXvO0P7g7AuhJ7lcKfDlhJCDw2gY=
-github.com/pelletier/go-toml/v2 v2.1.1 h1:LWAJwfNvjQZCFIDKWYQaM62NcYeYViCmWIwmOStowAI=
-github.com/pelletier/go-toml/v2 v2.1.1/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc=
+github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM=
+github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
@@ -234,8 +234,8 @@ github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDN
github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
-github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ=
-github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4=
+github.com/sagikazarmark/locafero v0.6.0 h1:ON7AQg37yzcRPU69mt7gwhFEBwxI6P9T4Qu3N51bwOk=
+github.com/sagikazarmark/locafero v0.6.0/go.mod h1:77OmuIc6VTraTXKXIs/uvUxKGUXjE1GbemJYHqdNjX0=
github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE=
github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ=
github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
@@ -246,17 +246,18 @@ github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8=
github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY=
github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0=
github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
-github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0=
-github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho=
+github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
+github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
-github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ=
-github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk=
+github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI=
+github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg=
github.com/ssgreg/journald v1.0.0 h1:0YmTDPJXxcWDPba12qNMdO6TxvfkFSYpFIJ31CwmLcU=
github.com/ssgreg/journald v1.0.0/go.mod h1:RUckwmTM8ghGWPslq2+ZBZzbb9/2KgjzYZ4JEP+oRt0=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
+github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
@@ -290,20 +291,20 @@ go.etcd.io/bbolt v1.3.10/go.mod h1:bK3UQLPJZly7IlNmV7uVHJDxfe5aK9Ll93e/74Y9oEQ=
go.mongodb.org/mongo-driver v1.11.4/go.mod h1:PTSz5yu21bkT/wXpkS7WR5f0ddqw5quethTUn9WM+2g=
go.mongodb.org/mongo-driver v1.13.1 h1:YIc7HTYsKndGK4RFzJ3covLz1byri52x0IoMB0Pt/vk=
go.mongodb.org/mongo-driver v1.13.1/go.mod h1:wcDf1JBCXy2mOW0bWHwO/IOYqdca1MPCwDtFu/Z9+eo=
-go.opentelemetry.io/otel v1.22.0 h1:xS7Ku+7yTFvDfDraDIJVpw7XPyuHlB9MCiqqX5mcJ6Y=
-go.opentelemetry.io/otel v1.22.0/go.mod h1:eoV4iAi3Ea8LkAEI9+GFT44O6T/D0GWAVFyZVCC6pMI=
+go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo=
+go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0 h1:9M3+rhx7kZCIQQhQRYaZCdNu1V73tm4TvXs2ntl98C4=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0/go.mod h1:noq80iT8rrHP1SfybmPiRGc9dc5M8RPmGvtwo7Oo7tc=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.22.0 h1:H2JFgRcGiyHg7H7bwcwaQJYrNFqCqrbTQ8K4p1OvDu8=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.22.0/go.mod h1:WfCWp1bGoYK8MeULtI15MmQVczfR+bFkk0DF3h06QmQ=
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.22.0 h1:zr8ymM5OWWjjiWRzwTfZ67c905+2TMHYp2lMJ52QTyM=
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.22.0/go.mod h1:sQs7FT2iLVJ+67vYngGJkPe1qr39IzaBzaj9IDNNY8k=
-go.opentelemetry.io/otel/metric v1.22.0 h1:lypMQnGyJYeuYPhOM/bgjbFM6WE44W1/T45er4d8Hhg=
-go.opentelemetry.io/otel/metric v1.22.0/go.mod h1:evJGjVpZv0mQ5QBRJoBF64yMuOf4xCWdXjK8pzFvliY=
+go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI=
+go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco=
go.opentelemetry.io/otel/sdk v1.22.0 h1:6coWHw9xw7EfClIC/+O31R8IY3/+EiRFHevmHafB2Gw=
go.opentelemetry.io/otel/sdk v1.22.0/go.mod h1:iu7luyVGYovrRpe2fmj3CVKouQNdTOkxtLzPvPz1DOc=
-go.opentelemetry.io/otel/trace v1.22.0 h1:Hg6pPujv0XG9QaVbGOBVHunyuLcCC3jN7WEhPx83XD0=
-go.opentelemetry.io/otel/trace v1.22.0/go.mod h1:RbbHXVqKES9QhzZq/fE5UnOSILqRt40a21sPw2He1xo=
+go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI=
+go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU=
go.opentelemetry.io/proto/otlp v1.1.0 h1:2Di21piLrCqJ3U3eXGCTPHE9R8Nh+0uglSnOyxikMeI=
go.opentelemetry.io/proto/otlp v1.1.0/go.mod h1:GpBHCBWiqvVLDqmHZsoMM3C5ySeKTC7ej/RNTae6MdY=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
@@ -321,14 +322,14 @@ golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0
golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA=
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
-golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRjZPVIx+MXInMzdvQqcA=
-golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08=
+golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8=
+golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
-golang.org/x/mod v0.16.0 h1:QX4fJ0Rr5cPQCF7O9lh9Se4pmwfwskqZfq5moyldzic=
-golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
+golang.org/x/mod v0.19.0 h1:fEdghXQSo20giMthA7cd28ZC+jts4amQ3YMXiP5oMQ8=
+golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
@@ -353,8 +354,8 @@ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
-golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
-golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
+golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -395,8 +396,9 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
-golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
+golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
+golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
@@ -404,19 +406,17 @@ golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4f
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
-golang.org/x/tools v0.19.0 h1:tfGCXNR1OsFG+sVdLAitlpjAvD/I6dHDKnYrpEZUHkw=
-golang.org/x/tools v0.19.0/go.mod h1:qoJWxmGSIBmAeriMx19ogtrEPrGtDbPK634QFIcLAhc=
+golang.org/x/tools v0.23.0 h1:SGsXPZ+2l4JsgaCKkx+FQ9YZ5XEtA1GZYuoDjenLjvg=
+golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
-google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de h1:F6qOa9AZTYJXOUEr4jDysRDLrm4PHePlge4v4TGAlxY=
-google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:VUhTRKeHn9wwcdrk73nvdC9gF178Tzhmt/qyaFcPLSo=
-google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de h1:jFNzHPIeuzhdRwVhbZdiym9q0ory/xY3sA+v2wPg8I0=
-google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:5iCWqnniDlqZHrd3neWVTOwvh/v6s3232omMecelax8=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de h1:cZGRis4/ot9uVm639a+rHCUaG0JJHEsdyzSQTMX+suY=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:H4O17MA/PE9BsGx3w+a+W2VOLLD1Qf7oJneAoU6WktY=
+google.golang.org/genproto/googleapis/api v0.0.0-20240311132316-a219d84964c2 h1:rIo7ocm2roD9DcFIX67Ym8icoGCKSARAiPljFhh5suQ=
+google.golang.org/genproto/googleapis/api v0.0.0-20240311132316-a219d84964c2/go.mod h1:O1cOfN1Cy6QEYr7VxtjOyP5AdAuR0aJ/MYZaaof623Y=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240314234333-6e1732d8331c h1:lfpJ/2rWPa/kJgxyyXM8PrNnfCzcmxJ265mADgwmvLI=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240314234333-6e1732d8331c/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY=
google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM=
google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
From 94f07b4778693842cbc7ac8e6b9228a688e71da1 Mon Sep 17 00:00:00 2001
From: Ekaterina Lebedeva
Date: Tue, 23 Jul 2024 12:15:20 +0300
Subject: [PATCH 004/705] [#1245] docker: Fix warnings
Signed-off-by: Ekaterina Lebedeva
---
.docker/Dockerfile.adm | 2 +-
.docker/Dockerfile.cli | 2 +-
.docker/Dockerfile.ir | 2 +-
.docker/Dockerfile.storage | 2 +-
4 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/.docker/Dockerfile.adm b/.docker/Dockerfile.adm
index b3dad06d3..5d67a1d04 100644
--- a/.docker/Dockerfile.adm
+++ b/.docker/Dockerfile.adm
@@ -1,4 +1,4 @@
-FROM golang:1.22 as builder
+FROM golang:1.22 AS builder
ARG BUILD=now
ARG VERSION=dev
ARG REPO=repository
diff --git a/.docker/Dockerfile.cli b/.docker/Dockerfile.cli
index 5adedc140..16f643b61 100644
--- a/.docker/Dockerfile.cli
+++ b/.docker/Dockerfile.cli
@@ -1,4 +1,4 @@
-FROM golang:1.22 as builder
+FROM golang:1.22 AS builder
ARG BUILD=now
ARG VERSION=dev
ARG REPO=repository
diff --git a/.docker/Dockerfile.ir b/.docker/Dockerfile.ir
index 25025bb2f..f2cb764e5 100644
--- a/.docker/Dockerfile.ir
+++ b/.docker/Dockerfile.ir
@@ -1,4 +1,4 @@
-FROM golang:1.22 as builder
+FROM golang:1.22 AS builder
ARG BUILD=now
ARG VERSION=dev
ARG REPO=repository
diff --git a/.docker/Dockerfile.storage b/.docker/Dockerfile.storage
index a16005516..cf7f97748 100644
--- a/.docker/Dockerfile.storage
+++ b/.docker/Dockerfile.storage
@@ -1,4 +1,4 @@
-FROM golang:1.22 as builder
+FROM golang:1.22 AS builder
ARG BUILD=now
ARG VERSION=dev
ARG REPO=repository
From 1032075a2195c07a46d303b254c6f08cb032f93c Mon Sep 17 00:00:00 2001
From: George Bartolomey
Date: Mon, 22 Jul 2024 11:08:09 +0300
Subject: [PATCH 005/705] [#1259] cli: Remove locode subcommand
Removed `frostfs-cli util locode` subcommand.
Alternative command could be found in
`git.frostfs.info/TrueCloudLab/frostfs-locode-db`.
Signed-off-by: George Bartolomey
---
cmd/frostfs-cli/modules/util/locode.go | 18 ----
.../modules/util/locode_generate.go | 96 -------------------
cmd/frostfs-cli/modules/util/locode_info.go | 56 -----------
cmd/frostfs-cli/modules/util/root.go | 2 -
4 files changed, 172 deletions(-)
delete mode 100644 cmd/frostfs-cli/modules/util/locode.go
delete mode 100644 cmd/frostfs-cli/modules/util/locode_generate.go
delete mode 100644 cmd/frostfs-cli/modules/util/locode_info.go
diff --git a/cmd/frostfs-cli/modules/util/locode.go b/cmd/frostfs-cli/modules/util/locode.go
deleted file mode 100644
index a1f0f4d3f..000000000
--- a/cmd/frostfs-cli/modules/util/locode.go
+++ /dev/null
@@ -1,18 +0,0 @@
-package util
-
-import (
- "github.com/spf13/cobra"
-)
-
-// locode section.
-var locodeCmd = &cobra.Command{
- Use: "locode",
- Short: "Working with FrostFS UN/LOCODE database",
-}
-
-func initLocodeCmd() {
- locodeCmd.AddCommand(locodeGenerateCmd, locodeInfoCmd)
-
- initUtilLocodeInfoCmd()
- initUtilLocodeGenerateCmd()
-}
diff --git a/cmd/frostfs-cli/modules/util/locode_generate.go b/cmd/frostfs-cli/modules/util/locode_generate.go
deleted file mode 100644
index 319dee1c6..000000000
--- a/cmd/frostfs-cli/modules/util/locode_generate.go
+++ /dev/null
@@ -1,96 +0,0 @@
-package util
-
-import (
- commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- locodedb "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode/db"
- airportsdb "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode/db/airports"
- locodebolt "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode/db/boltdb"
- continentsdb "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode/db/continents/geojson"
- csvlocode "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode/table/csv"
- "github.com/spf13/cobra"
-)
-
-type namesDB struct {
- *airportsdb.DB
- *csvlocode.Table
-}
-
-const (
- locodeGenerateInputFlag = "in"
- locodeGenerateSubDivFlag = "subdiv"
- locodeGenerateAirportsFlag = "airports"
- locodeGenerateCountriesFlag = "countries"
- locodeGenerateContinentsFlag = "continents"
- locodeGenerateOutputFlag = "out"
-)
-
-var (
- locodeGenerateInPaths []string
- locodeGenerateSubDivPath string
- locodeGenerateAirportsPath string
- locodeGenerateCountriesPath string
- locodeGenerateContinentsPath string
- locodeGenerateOutPath string
-
- locodeGenerateCmd = &cobra.Command{
- Use: "generate",
- Short: "Generate UN/LOCODE database for FrostFS",
- Run: func(cmd *cobra.Command, _ []string) {
- locodeDB := csvlocode.New(
- csvlocode.Prm{
- Path: locodeGenerateInPaths[0],
- SubDivPath: locodeGenerateSubDivPath,
- },
- csvlocode.WithExtraPaths(locodeGenerateInPaths[1:]...),
- )
-
- airportDB := airportsdb.New(airportsdb.Prm{
- AirportsPath: locodeGenerateAirportsPath,
- CountriesPath: locodeGenerateCountriesPath,
- })
-
- continentsDB := continentsdb.New(continentsdb.Prm{
- Path: locodeGenerateContinentsPath,
- })
-
- targetDB := locodebolt.New(locodebolt.Prm{
- Path: locodeGenerateOutPath,
- })
-
- err := targetDB.Open()
- commonCmd.ExitOnErr(cmd, "", err)
-
- defer targetDB.Close()
-
- names := &namesDB{
- DB: airportDB,
- Table: locodeDB,
- }
-
- err = locodedb.FillDatabase(locodeDB, airportDB, continentsDB, names, targetDB)
- commonCmd.ExitOnErr(cmd, "", err)
- },
- }
-)
-
-func initUtilLocodeGenerateCmd() {
- flags := locodeGenerateCmd.Flags()
-
- flags.StringSliceVar(&locodeGenerateInPaths, locodeGenerateInputFlag, nil, "List of paths to UN/LOCODE tables (csv)")
- _ = locodeGenerateCmd.MarkFlagRequired(locodeGenerateInputFlag)
-
- flags.StringVar(&locodeGenerateSubDivPath, locodeGenerateSubDivFlag, "", "Path to UN/LOCODE subdivision database (csv)")
- _ = locodeGenerateCmd.MarkFlagRequired(locodeGenerateSubDivFlag)
-
- flags.StringVar(&locodeGenerateAirportsPath, locodeGenerateAirportsFlag, "", "Path to OpenFlights airport database (csv)")
- _ = locodeGenerateCmd.MarkFlagRequired(locodeGenerateAirportsFlag)
-
- flags.StringVar(&locodeGenerateCountriesPath, locodeGenerateCountriesFlag, "", "Path to OpenFlights country database (csv)")
- _ = locodeGenerateCmd.MarkFlagRequired(locodeGenerateCountriesFlag)
-
- flags.StringVar(&locodeGenerateContinentsPath, locodeGenerateContinentsFlag, "", "Path to continent polygons (GeoJSON)")
- _ = locodeGenerateCmd.MarkFlagRequired(locodeGenerateContinentsFlag)
-
- flags.StringVar(&locodeGenerateOutPath, locodeGenerateOutputFlag, "", "Target path for generated database")
- _ = locodeGenerateCmd.MarkFlagRequired(locodeGenerateOutputFlag)
-}
diff --git a/cmd/frostfs-cli/modules/util/locode_info.go b/cmd/frostfs-cli/modules/util/locode_info.go
deleted file mode 100644
index e89252dea..000000000
--- a/cmd/frostfs-cli/modules/util/locode_info.go
+++ /dev/null
@@ -1,56 +0,0 @@
-package util
-
-import (
- commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- locodedb "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode/db"
- locodebolt "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode/db/boltdb"
- "github.com/spf13/cobra"
-)
-
-const (
- locodeInfoDBFlag = "db"
- locodeInfoCodeFlag = "locode"
-)
-
-var (
- locodeInfoDBPath string
- locodeInfoCode string
-
- locodeInfoCmd = &cobra.Command{
- Use: "info",
- Short: "Print information about UN/LOCODE from FrostFS database",
- Run: func(cmd *cobra.Command, _ []string) {
- targetDB := locodebolt.New(locodebolt.Prm{
- Path: locodeInfoDBPath,
- }, locodebolt.ReadOnly())
-
- err := targetDB.Open()
- commonCmd.ExitOnErr(cmd, "", err)
-
- defer targetDB.Close()
-
- record, err := locodedb.LocodeRecord(targetDB, locodeInfoCode)
- commonCmd.ExitOnErr(cmd, "", err)
-
- cmd.Printf("Country: %s\n", record.CountryName())
- cmd.Printf("Location: %s\n", record.LocationName())
- cmd.Printf("Continent: %s\n", record.Continent())
- if subDivCode := record.SubDivCode(); subDivCode != "" {
- cmd.Printf("Subdivision: [%s] %s\n", subDivCode, record.SubDivName())
- }
-
- geoPoint := record.GeoPoint()
- cmd.Printf("Coordinates: %0.2f, %0.2f\n", geoPoint.Latitude(), geoPoint.Longitude())
- },
- }
-)
-
-func initUtilLocodeInfoCmd() {
- flags := locodeInfoCmd.Flags()
-
- flags.StringVar(&locodeInfoDBPath, locodeInfoDBFlag, "", "Path to FrostFS UN/LOCODE database")
- _ = locodeInfoCmd.MarkFlagRequired(locodeInfoDBFlag)
-
- flags.StringVar(&locodeInfoCode, locodeInfoCodeFlag, "", "UN/LOCODE")
- _ = locodeInfoCmd.MarkFlagRequired(locodeInfoCodeFlag)
-}
diff --git a/cmd/frostfs-cli/modules/util/root.go b/cmd/frostfs-cli/modules/util/root.go
index 4a6b4403b..a909e6899 100644
--- a/cmd/frostfs-cli/modules/util/root.go
+++ b/cmd/frostfs-cli/modules/util/root.go
@@ -23,11 +23,9 @@ func init() {
signCmd,
convertCmd,
keyerCmd,
- locodeCmd,
)
initSignCmd()
initConvertCmd()
initKeyerCmd()
- initLocodeCmd()
}
From 9c2c76ca32be3aef1169480fdce203e3947805d3 Mon Sep 17 00:00:00 2001
From: George Bartolomey
Date: Mon, 22 Jul 2024 11:12:41 +0300
Subject: [PATCH 006/705] [#1259] Move pkg/util/locode to frostfs-locode-db
Removed pkg/util/locode package, added
git.frostfs.info/TrueCloudLab/frostfs-locode-db/pkg/locode dependency.
Signed-off-by: George Bartolomey
---
go.mod | 3 +-
go.sum | 39 +---
pkg/innerring/locode.go | 6 +-
.../netmap/nodevalidation/locode/calls.go | 2 +-
.../nodevalidation/locode/calls_test.go | 4 +-
.../netmap/nodevalidation/locode/deps.go | 4 +-
pkg/util/locode/column/coordinates.go | 193 -----------------
pkg/util/locode/column/country.go | 38 ----
pkg/util/locode/column/location.go | 38 ----
pkg/util/locode/column/util.go | 9 -
pkg/util/locode/db/airports/calls.go | 194 ------------------
pkg/util/locode/db/airports/db.go | 83 --------
pkg/util/locode/db/airports/opts.go | 19 --
pkg/util/locode/db/boltdb/calls.go | 166 ---------------
pkg/util/locode/db/boltdb/db.go | 73 -------
pkg/util/locode/db/boltdb/opts.go | 37 ----
pkg/util/locode/db/continent.go | 81 --------
.../locode/db/continents/geojson/calls.go | 98 ---------
pkg/util/locode/db/continents/geojson/db.go | 63 ------
pkg/util/locode/db/continents/geojson/opts.go | 10 -
pkg/util/locode/db/country.go | 32 ---
pkg/util/locode/db/db.go | 183 -----------------
pkg/util/locode/db/location.go | 32 ---
pkg/util/locode/db/point.go | 93 ---------
pkg/util/locode/db/point_test.go | 51 -----
pkg/util/locode/db/record.go | 140 -------------
pkg/util/locode/record.go | 83 --------
pkg/util/locode/table/csv/calls.go | 156 --------------
pkg/util/locode/table/csv/opts.go | 28 ---
pkg/util/locode/table/csv/table.go | 75 -------
30 files changed, 11 insertions(+), 2022 deletions(-)
delete mode 100644 pkg/util/locode/column/coordinates.go
delete mode 100644 pkg/util/locode/column/country.go
delete mode 100644 pkg/util/locode/column/location.go
delete mode 100644 pkg/util/locode/column/util.go
delete mode 100644 pkg/util/locode/db/airports/calls.go
delete mode 100644 pkg/util/locode/db/airports/db.go
delete mode 100644 pkg/util/locode/db/airports/opts.go
delete mode 100644 pkg/util/locode/db/boltdb/calls.go
delete mode 100644 pkg/util/locode/db/boltdb/db.go
delete mode 100644 pkg/util/locode/db/boltdb/opts.go
delete mode 100644 pkg/util/locode/db/continent.go
delete mode 100644 pkg/util/locode/db/continents/geojson/calls.go
delete mode 100644 pkg/util/locode/db/continents/geojson/db.go
delete mode 100644 pkg/util/locode/db/continents/geojson/opts.go
delete mode 100644 pkg/util/locode/db/country.go
delete mode 100644 pkg/util/locode/db/db.go
delete mode 100644 pkg/util/locode/db/location.go
delete mode 100644 pkg/util/locode/db/point.go
delete mode 100644 pkg/util/locode/db/point_test.go
delete mode 100644 pkg/util/locode/db/record.go
delete mode 100644 pkg/util/locode/record.go
delete mode 100644 pkg/util/locode/table/csv/calls.go
delete mode 100644 pkg/util/locode/table/csv/opts.go
delete mode 100644 pkg/util/locode/table/csv/table.go
diff --git a/go.mod b/go.mod
index 0f608f74d..6a97f7850 100644
--- a/go.mod
+++ b/go.mod
@@ -7,6 +7,7 @@ require (
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240530152826-2f6d3209e1d3
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0
+ git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20231101111734-b3ad3335ff65
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240617140730-1a5886e776de
git.frostfs.info/TrueCloudLab/hrw v1.2.1
@@ -26,7 +27,6 @@ require (
github.com/nspcc-dev/neo-go v0.106.0
github.com/olekukonko/tablewriter v0.0.5
github.com/panjf2000/ants/v2 v2.9.0
- github.com/paulmach/orb v0.11.0
github.com/prometheus/client_golang v1.19.0
github.com/spf13/cast v1.6.0
github.com/spf13/cobra v1.8.1
@@ -51,7 +51,6 @@ require (
github.com/sagikazarmark/locafero v0.6.0 // indirect
github.com/sagikazarmark/slog-shim v0.1.0 // indirect
github.com/sourcegraph/conc v0.3.0 // indirect
- go.mongodb.org/mongo-driver v1.13.1 // indirect
)
require (
diff --git a/go.sum b/go.sum
index 355e176f7..d4cf863fa 100644
--- a/go.sum
+++ b/go.sum
@@ -6,6 +6,8 @@ git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e/go.mod h1:F/fe1OoIDKr5Bz99q4sriuHDuf3aZefZy9ZsCqEtgxc=
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSVCB8JNSfPG7Uk4r2oSk=
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0/go.mod h1:RUIKZATQLJ+TaYQa60X2fTDwfuhMfm8Ar60bQ5fr+vU=
+git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d h1:uJ/wvuMdepbkaV8XMS5uN9B0FQWMep0CttSuDZiDhq0=
+git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d/go.mod h1:7ZZq8iguY7qFsXajdHGmZd2AW4QbucyrJwhbsRfOfek=
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20231101111734-b3ad3335ff65 h1:PaZ8GpnUoXxUoNsc1qp36bT2u7FU+neU4Jn9cl8AWqI=
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20231101111734-b3ad3335ff65/go.mod h1:6aAX80dvJ3r5fjN9CzzPglRptoiPgIC9KFGGsUA+1Hw=
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240617140730-1a5886e776de h1:OjsWY0jpGJV1t87XgwL/3PsDx7fJ6lfNMXtY8UhoUbM=
@@ -81,7 +83,6 @@ github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre
github.com/go-pkgz/expirable-cache/v3 v3.0.0 h1:u3/gcu3sabLYiTCevoRKv+WzjIn5oo7P8XtiXBeRDLw=
github.com/go-pkgz/expirable-cache/v3 v3.0.0/go.mod h1:2OQiDyEGQalYecLWmXprm3maPXeVb5/6/X7yRPYTzec=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
-github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
@@ -91,13 +92,11 @@ github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvq
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
-github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
@@ -129,20 +128,14 @@ github.com/ipfs/go-cid v0.4.1 h1:A/T3qGvxi4kpKWWcPC/PgbvDA2bjVLO7n4UeVwnbs/s=
github.com/ipfs/go-cid v0.4.1/go.mod h1:uQHwDeX4c6CtyrFwdqyhpNcxVewur1M7l7fNU7LKwZk=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
-github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
-github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4=
github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM=
github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc=
github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
github.com/klauspost/reedsolomon v1.12.1 h1:NhWgum1efX1x58daOBGCFWcxtEhOhXKKl1HAPQUp03Q=
github.com/klauspost/reedsolomon v1.12.1/go.mod h1:nEi5Kjb6QqtbofI6s+cbG/j1da11c96IBYBSnVGtuBs=
-github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
-github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
-github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
@@ -171,7 +164,6 @@ github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RR
github.com/mmcloughlin/addchain v0.4.0 h1:SobOdjm2xLj1KkXN5/n0xTIWyZA2+s99UCY1iPfkHRY=
github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqkyU72HC5wJ4RlU=
github.com/mmcloughlin/profile v0.1.1/go.mod h1:IhHD7q1ooxgwTgjxQYkACGA77oFTDdFVejUS1/tS/qU=
-github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc=
github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o=
github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE=
@@ -210,12 +202,8 @@ github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw=
github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro=
github.com/panjf2000/ants/v2 v2.9.0 h1:SztCLkVxBRigbg+vt0S5QvF5vxAbxbKt09/YfAJ0tEo=
github.com/panjf2000/ants/v2 v2.9.0/go.mod h1:7ZxyxsqE4vvW0M7LSD8aI3cKwgFhBHbxnlN8mDqHa1I=
-github.com/paulmach/orb v0.11.0 h1:JfVXJUBeH9ifc/OrhBY0lL16QsmPgpCHMlqSSYhcgAA=
-github.com/paulmach/orb v0.11.0/go.mod h1:5mULz1xQfs3bmQm63QEJA6lNGujuRafwA5S/EnuLaLU=
-github.com/paulmach/protoscan v0.2.1/go.mod h1:SpcSwydNLrxUGSDvXvO0P7g7AuhJ7lcKfDlhJCDw2gY=
github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM=
github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs=
-github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
@@ -259,7 +247,6 @@ github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSS
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
-github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
@@ -272,25 +259,14 @@ github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d h1:vfofYNRScrDdvS342BElfbETmL1Aiz3i2t0zfRj16Hs=
github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48=
-github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
github.com/twmb/murmur3 v1.1.8 h1:8Yt9taO/WN3l08xErzjeschgZU2QSrwm1kclYq+0aRg=
github.com/twmb/murmur3 v1.1.8/go.mod h1:Qq/R7NUyOfr65zD+6Q5IHKsJLwP7exErjN6lyyq3OSQ=
github.com/urfave/cli v1.22.14 h1:ebbhrRiGK2i4naQJr+1Xj92HXZCrK7MsyTS/ob3HnAk=
github.com/urfave/cli v1.22.14/go.mod h1:X0eDS6pD6Exaclxm99NJ3FiCDRED7vIHpx2mDOHLvkA=
-github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
-github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g=
-github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4=
-github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8=
-github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM=
-github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA=
-github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
go.etcd.io/bbolt v1.3.10 h1:+BqfJTcCzTItrop8mq/lbzL8wSGtj94UO/3U31shqG0=
go.etcd.io/bbolt v1.3.10/go.mod h1:bK3UQLPJZly7IlNmV7uVHJDxfe5aK9Ll93e/74Y9oEQ=
-go.mongodb.org/mongo-driver v1.11.4/go.mod h1:PTSz5yu21bkT/wXpkS7WR5f0ddqw5quethTUn9WM+2g=
-go.mongodb.org/mongo-driver v1.13.1 h1:YIc7HTYsKndGK4RFzJ3covLz1byri52x0IoMB0Pt/vk=
-go.mongodb.org/mongo-driver v1.13.1/go.mod h1:wcDf1JBCXy2mOW0bWHwO/IOYqdca1MPCwDtFu/Z9+eo=
go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo=
go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0 h1:9M3+rhx7kZCIQQhQRYaZCdNu1V73tm4TvXs2ntl98C4=
@@ -318,13 +294,11 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
-golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA=
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8=
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
-golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
@@ -333,12 +307,10 @@ golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
-golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
@@ -348,9 +320,7 @@ golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs=
golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
@@ -393,7 +363,6 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
-golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
@@ -401,9 +370,7 @@ golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.23.0 h1:SGsXPZ+2l4JsgaCKkx+FQ9YZ5XEtA1GZYuoDjenLjvg=
@@ -427,11 +394,9 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
diff --git a/pkg/innerring/locode.go b/pkg/innerring/locode.go
index a9a9498b6..a0c3ea751 100644
--- a/pkg/innerring/locode.go
+++ b/pkg/innerring/locode.go
@@ -1,11 +1,11 @@
package innerring
import (
+ "git.frostfs.info/TrueCloudLab/frostfs-locode-db/pkg/locode"
+ locodedb "git.frostfs.info/TrueCloudLab/frostfs-locode-db/pkg/locode/db"
+ locodebolt "git.frostfs.info/TrueCloudLab/frostfs-locode-db/pkg/locode/db/boltdb"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap"
irlocode "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap/nodevalidation/locode"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode"
- locodedb "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode/db"
- locodebolt "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode/db/boltdb"
"github.com/spf13/viper"
)
diff --git a/pkg/innerring/processors/netmap/nodevalidation/locode/calls.go b/pkg/innerring/processors/netmap/nodevalidation/locode/calls.go
index d071a7792..5e0558344 100644
--- a/pkg/innerring/processors/netmap/nodevalidation/locode/calls.go
+++ b/pkg/innerring/processors/netmap/nodevalidation/locode/calls.go
@@ -4,7 +4,7 @@ import (
"errors"
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode"
+ "git.frostfs.info/TrueCloudLab/frostfs-locode-db/pkg/locode"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
)
diff --git a/pkg/innerring/processors/netmap/nodevalidation/locode/calls_test.go b/pkg/innerring/processors/netmap/nodevalidation/locode/calls_test.go
index 6697391e8..8ab174dfd 100644
--- a/pkg/innerring/processors/netmap/nodevalidation/locode/calls_test.go
+++ b/pkg/innerring/processors/netmap/nodevalidation/locode/calls_test.go
@@ -5,9 +5,9 @@ import (
"fmt"
"testing"
+ locodestd "git.frostfs.info/TrueCloudLab/frostfs-locode-db/pkg/locode"
+ locodedb "git.frostfs.info/TrueCloudLab/frostfs-locode-db/pkg/locode/db"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap/nodevalidation/locode"
- locodestd "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode"
- locodedb "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode/db"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"github.com/stretchr/testify/require"
)
diff --git a/pkg/innerring/processors/netmap/nodevalidation/locode/deps.go b/pkg/innerring/processors/netmap/nodevalidation/locode/deps.go
index e6332261e..8f6667933 100644
--- a/pkg/innerring/processors/netmap/nodevalidation/locode/deps.go
+++ b/pkg/innerring/processors/netmap/nodevalidation/locode/deps.go
@@ -1,8 +1,8 @@
package locode
import (
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode"
- locodedb "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode/db"
+ "git.frostfs.info/TrueCloudLab/frostfs-locode-db/pkg/locode"
+ locodedb "git.frostfs.info/TrueCloudLab/frostfs-locode-db/pkg/locode/db"
)
// Record is an interface of read-only
diff --git a/pkg/util/locode/column/coordinates.go b/pkg/util/locode/column/coordinates.go
deleted file mode 100644
index 5e32c016e..000000000
--- a/pkg/util/locode/column/coordinates.go
+++ /dev/null
@@ -1,193 +0,0 @@
-package locodecolumn
-
-import (
- "fmt"
- "strings"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode"
-)
-
-const (
- minutesDigits = 2
- hemisphereSymbols = 1
-)
-
-const (
- latDegDigits = 2
- lngDegDigits = 3
-)
-
-type coordinateCode struct {
- degDigits int
- value []uint8
-}
-
-// LongitudeCode represents the value of the longitude
-// of the location conforming to UN/LOCODE specification.
-type LongitudeCode coordinateCode
-
-// LongitudeHemisphere represents the hemisphere of the earth
-// // along the Greenwich meridian.
-type LongitudeHemisphere [hemisphereSymbols]uint8
-
-// LatitudeCode represents the value of the latitude
-// of the location conforming to UN/LOCODE specification.
-type LatitudeCode coordinateCode
-
-// LatitudeHemisphere represents the hemisphere of the earth
-// along the equator.
-type LatitudeHemisphere [hemisphereSymbols]uint8
-
-func coordinateFromString(s string, degDigits int, hemisphereAlphabet []uint8) (*coordinateCode, error) {
- if len(s) != degDigits+minutesDigits+hemisphereSymbols {
- return nil, locode.ErrInvalidString
- }
-
- for i := range s[:degDigits+minutesDigits] {
- if !isDigit(s[i]) {
- return nil, locode.ErrInvalidString
- }
- }
-
-loop:
- for _, sym := range s[degDigits+minutesDigits:] {
- for j := range hemisphereAlphabet {
- if hemisphereAlphabet[j] == uint8(sym) {
- continue loop
- }
- }
-
- return nil, locode.ErrInvalidString
- }
-
- return &coordinateCode{
- degDigits: degDigits,
- value: []uint8(s),
- }, nil
-}
-
-// LongitudeFromString parses a string and returns the location's longitude.
-func LongitudeFromString(s string) (*LongitudeCode, error) {
- cc, err := coordinateFromString(s, lngDegDigits, []uint8{'W', 'E'})
- if err != nil {
- return nil, err
- }
-
- return (*LongitudeCode)(cc), nil
-}
-
-// LatitudeFromString parses a string and returns the location's latitude.
-func LatitudeFromString(s string) (*LatitudeCode, error) {
- cc, err := coordinateFromString(s, latDegDigits, []uint8{'N', 'S'})
- if err != nil {
- return nil, err
- }
-
- return (*LatitudeCode)(cc), nil
-}
-
-func (cc *coordinateCode) degrees() []uint8 {
- return cc.value[:cc.degDigits]
-}
-
-// Degrees returns the longitude's degrees.
-func (lc *LongitudeCode) Degrees() (l [lngDegDigits]uint8) {
- copy(l[:], (*coordinateCode)(lc).degrees())
- return
-}
-
-// Degrees returns the latitude's degrees.
-func (lc *LatitudeCode) Degrees() (l [latDegDigits]uint8) {
- copy(l[:], (*coordinateCode)(lc).degrees())
- return
-}
-
-func (cc *coordinateCode) minutes() (mnt [minutesDigits]uint8) {
- for i := 0; i < minutesDigits; i++ {
- mnt[i] = cc.value[cc.degDigits+i]
- }
-
- return
-}
-
-// Minutes returns the longitude's minutes.
-func (lc *LongitudeCode) Minutes() [minutesDigits]uint8 {
- return (*coordinateCode)(lc).minutes()
-}
-
-// Minutes returns the latitude's minutes.
-func (lc *LatitudeCode) Minutes() [minutesDigits]uint8 {
- return (*coordinateCode)(lc).minutes()
-}
-
-// Hemisphere returns the longitude's hemisphere code.
-func (lc *LongitudeCode) Hemisphere() LongitudeHemisphere {
- return (*coordinateCode)(lc).hemisphere()
-}
-
-// Hemisphere returns the latitude's hemisphere code.
-func (lc *LatitudeCode) Hemisphere() LatitudeHemisphere {
- return (*coordinateCode)(lc).hemisphere()
-}
-
-func (cc *coordinateCode) hemisphere() (h [hemisphereSymbols]uint8) {
- for i := 0; i < hemisphereSymbols; i++ {
- h[i] = cc.value[cc.degDigits+minutesDigits+i]
- }
-
- return h
-}
-
-// North returns true for the northern hemisphere.
-func (h LatitudeHemisphere) North() bool {
- return h[0] == 'N'
-}
-
-// East returns true for the eastern hemisphere.
-func (h LongitudeHemisphere) East() bool {
- return h[0] == 'E'
-}
-
-// Coordinates represents the coordinates of the location from UN/LOCODE table.
-type Coordinates struct {
- lat *LatitudeCode
-
- lng *LongitudeCode
-}
-
-// Latitude returns the location's latitude.
-func (c *Coordinates) Latitude() *LatitudeCode {
- return c.lat
-}
-
-// Longitude returns the location's longitude.
-func (c *Coordinates) Longitude() *LongitudeCode {
- return c.lng
-}
-
-// CoordinatesFromString parses a string and returns the location's coordinates.
-func CoordinatesFromString(s string) (*Coordinates, error) {
- if len(s) == 0 {
- return nil, nil
- }
-
- strs := strings.Split(s, " ")
- if len(strs) != 2 {
- return nil, locode.ErrInvalidString
- }
-
- lat, err := LatitudeFromString(strs[0])
- if err != nil {
- return nil, fmt.Errorf("could not parse latitude: %w", err)
- }
-
- lng, err := LongitudeFromString(strs[1])
- if err != nil {
- return nil, fmt.Errorf("could not parse longitude: %w", err)
- }
-
- return &Coordinates{
- lat: lat,
- lng: lng,
- }, nil
-}
diff --git a/pkg/util/locode/column/country.go b/pkg/util/locode/column/country.go
deleted file mode 100644
index 7b29a97c5..000000000
--- a/pkg/util/locode/column/country.go
+++ /dev/null
@@ -1,38 +0,0 @@
-package locodecolumn
-
-import (
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode"
-)
-
-const countryCodeLen = 2
-
-// CountryCode represents ISO 3166 alpha-2 Country Code.
-type CountryCode [countryCodeLen]uint8
-
-// Symbols returns digits of the country code.
-func (cc *CountryCode) Symbols() [countryCodeLen]uint8 {
- return *cc
-}
-
-// CountryCodeFromString parses a string and returns the country code.
-func CountryCodeFromString(s string) (*CountryCode, error) {
- if l := len(s); l != countryCodeLen {
- return nil, fmt.Errorf("incorrect country code length: expect: %d, got: %d",
- countryCodeLen,
- l,
- )
- }
-
- for i := range s {
- if !isUpperAlpha(s[i]) {
- return nil, locode.ErrInvalidString
- }
- }
-
- cc := CountryCode{}
- copy(cc[:], s)
-
- return &cc, nil
-}
diff --git a/pkg/util/locode/column/location.go b/pkg/util/locode/column/location.go
deleted file mode 100644
index 4303228fb..000000000
--- a/pkg/util/locode/column/location.go
+++ /dev/null
@@ -1,38 +0,0 @@
-package locodecolumn
-
-import (
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode"
-)
-
-const locationCodeLen = 3
-
-// LocationCode represents 3-character code for the location.
-type LocationCode [locationCodeLen]uint8
-
-// Symbols returns characters of the location code.
-func (lc *LocationCode) Symbols() [locationCodeLen]uint8 {
- return *lc
-}
-
-// LocationCodeFromString parses a string and returns the location code.
-func LocationCodeFromString(s string) (*LocationCode, error) {
- if l := len(s); l != locationCodeLen {
- return nil, fmt.Errorf("incorrect location code length: expect: %d, got: %d",
- locationCodeLen,
- l,
- )
- }
-
- for i := range s {
- if !isUpperAlpha(s[i]) && !isDigit(s[i]) {
- return nil, locode.ErrInvalidString
- }
- }
-
- lc := LocationCode{}
- copy(lc[:], s)
-
- return &lc, nil
-}
diff --git a/pkg/util/locode/column/util.go b/pkg/util/locode/column/util.go
deleted file mode 100644
index 8da1f9a25..000000000
--- a/pkg/util/locode/column/util.go
+++ /dev/null
@@ -1,9 +0,0 @@
-package locodecolumn
-
-func isDigit(sym uint8) bool {
- return sym >= '0' && sym <= '9'
-}
-
-func isUpperAlpha(sym uint8) bool {
- return sym >= 'A' && sym <= 'Z'
-}
diff --git a/pkg/util/locode/db/airports/calls.go b/pkg/util/locode/db/airports/calls.go
deleted file mode 100644
index dac8cce8b..000000000
--- a/pkg/util/locode/db/airports/calls.go
+++ /dev/null
@@ -1,194 +0,0 @@
-package airportsdb
-
-import (
- "encoding/csv"
- "errors"
- "fmt"
- "io"
- "os"
- "strconv"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode"
- locodedb "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode/db"
-)
-
-const (
- _ = iota - 1
-
- _ // Airport ID
- _ // Name
- airportCity
- airportCountry
- airportIATA
- _ // ICAO
- airportLatitude
- airportLongitude
- _ // Altitude
- _ // Timezone
- _ // DST
- _ // Tz database time zone
- _ // Type
- _ // Source
-
- airportFldNum
-)
-
-type record struct {
- city,
- country,
- iata,
- lat,
- lng string
-}
-
-// Get scans the records of the OpenFlights Airport to an in-memory table (once),
-// and returns an entry that matches the passed UN/LOCODE record.
-//
-// Records are matched if they have the same country code and either
-// same IATA code or same city name (location name in UN/LOCODE).
-//
-// Returns locodedb.ErrAirportNotFound if no entry matches.
-func (db *DB) Get(locodeRecord locode.Record) (*locodedb.AirportRecord, error) {
- if err := db.initAirports(); err != nil {
- return nil, err
- }
-
- records := db.mAirports[locodeRecord.LOCODE.CountryCode()]
-
- for i := range records {
- if locodeRecord.LOCODE.LocationCode() != records[i].iata &&
- locodeRecord.NameWoDiacritics != records[i].city {
- continue
- }
-
- lat, err := strconv.ParseFloat(records[i].lat, 64)
- if err != nil {
- return nil, err
- }
-
- lng, err := strconv.ParseFloat(records[i].lng, 64)
- if err != nil {
- return nil, err
- }
-
- return &locodedb.AirportRecord{
- CountryName: records[i].country,
- Point: locodedb.NewPoint(lat, lng),
- }, nil
- }
-
- return nil, locodedb.ErrAirportNotFound
-}
-
-const (
- _ = iota - 1
-
- countryName
- countryISOCode
- _ // dafif_code
-
- countryFldNum
-)
-
-// CountryName scans the records of the OpenFlights Country table to an in-memory table (once),
-// and returns the name of the country by code.
-//
-// Returns locodedb.ErrCountryNotFound if no entry matches.
-func (db *DB) CountryName(code *locodedb.CountryCode) (name string, err error) {
- if err = db.initCountries(); err != nil {
- return
- }
-
- argCode := code.String()
-
- for cName, cCode := range db.mCountries {
- if cCode == argCode {
- name = cName
- break
- }
- }
-
- if name == "" {
- err = locodedb.ErrCountryNotFound
- }
-
- return
-}
-
-func (db *DB) initAirports() (err error) {
- db.airportsOnce.Do(func() {
- db.mAirports = make(map[string][]record)
-
- if err = db.initCountries(); err != nil {
- return
- }
-
- err = db.scanWords(db.airports, airportFldNum, func(words []string) error {
- countryCode := db.mCountries[words[airportCountry]]
- if countryCode != "" {
- db.mAirports[countryCode] = append(db.mAirports[countryCode], record{
- city: words[airportCity],
- country: words[airportCountry],
- iata: words[airportIATA],
- lat: words[airportLatitude],
- lng: words[airportLongitude],
- })
- }
-
- return nil
- })
- })
-
- return
-}
-
-func (db *DB) initCountries() (err error) {
- db.countriesOnce.Do(func() {
- db.mCountries = make(map[string]string)
-
- err = db.scanWords(db.countries, countryFldNum, func(words []string) error {
- db.mCountries[words[countryName]] = words[countryISOCode]
-
- return nil
- })
- })
-
- return
-}
-
-var errScanInt = errors.New("interrupt scan")
-
-func (db *DB) scanWords(pm pathMode, num int, wordsHandler func([]string) error) error {
- tableFile, err := os.OpenFile(pm.path, os.O_RDONLY, pm.mode)
- if err != nil {
- return err
- }
-
- defer tableFile.Close()
-
- r := csv.NewReader(tableFile)
- r.ReuseRecord = true
-
- for {
- words, err := r.Read()
- if err != nil {
- if errors.Is(err, io.EOF) {
- break
- }
-
- return err
- } else if ln := len(words); ln != num {
- return fmt.Errorf("unexpected number of words %d", ln)
- }
-
- if err := wordsHandler(words); err != nil {
- if errors.Is(err, errScanInt) {
- break
- }
-
- return err
- }
- }
-
- return nil
-}
diff --git a/pkg/util/locode/db/airports/db.go b/pkg/util/locode/db/airports/db.go
deleted file mode 100644
index acfa3fd60..000000000
--- a/pkg/util/locode/db/airports/db.go
+++ /dev/null
@@ -1,83 +0,0 @@
-package airportsdb
-
-import (
- "fmt"
- "io/fs"
- "sync"
-)
-
-// Prm groups the required parameters of the DB's constructor.
-//
-// All values must comply with the requirements imposed on them.
-// Passing incorrect parameter values will result in constructor
-// failure (error or panic depending on the implementation).
-type Prm struct {
- // Path to OpenFlights Airport csv table.
- //
- // Must not be empty.
- AirportsPath string
-
- // Path to OpenFlights Countries csv table.
- //
- // Must not be empty.
- CountriesPath string
-}
-
-// DB is a descriptor of the OpenFlights database in csv format.
-//
-// For correct operation, DB must be created
-// using the constructor (New) based on the required parameters
-// and optional components. After successful creation,
-// The DB is immediately ready to work through API.
-type DB struct {
- airports, countries pathMode
-
- airportsOnce, countriesOnce sync.Once
-
- mCountries map[string]string
-
- mAirports map[string][]record
-}
-
-type pathMode struct {
- path string
- mode fs.FileMode
-}
-
-const invalidPrmValFmt = "invalid parameter %s (%T):%v"
-
-func panicOnPrmValue(n string, v any) {
- panic(fmt.Sprintf(invalidPrmValFmt, n, v, v))
-}
-
-// New creates a new instance of the DB.
-//
-// Panics if at least one value of the parameters is invalid.
-//
-// The created DB does not require additional
-// initialization and is completely ready for work.
-func New(prm Prm, opts ...Option) *DB {
- switch {
- case prm.AirportsPath == "":
- panicOnPrmValue("AirportsPath", prm.AirportsPath)
- case prm.CountriesPath == "":
- panicOnPrmValue("CountriesPath", prm.CountriesPath)
- }
-
- o := defaultOpts()
-
- for i := range opts {
- opts[i](o)
- }
-
- return &DB{
- airports: pathMode{
- path: prm.AirportsPath,
- mode: o.airportMode,
- },
- countries: pathMode{
- path: prm.CountriesPath,
- mode: o.countryMode,
- },
- }
-}
diff --git a/pkg/util/locode/db/airports/opts.go b/pkg/util/locode/db/airports/opts.go
deleted file mode 100644
index 3799d9e27..000000000
--- a/pkg/util/locode/db/airports/opts.go
+++ /dev/null
@@ -1,19 +0,0 @@
-package airportsdb
-
-import (
- "io/fs"
-)
-
-// Option sets an optional parameter of DB.
-type Option func(*options)
-
-type options struct {
- airportMode, countryMode fs.FileMode
-}
-
-func defaultOpts() *options {
- return &options{
- airportMode: fs.ModePerm, // 0777
- countryMode: fs.ModePerm, // 0777
- }
-}
diff --git a/pkg/util/locode/db/boltdb/calls.go b/pkg/util/locode/db/boltdb/calls.go
deleted file mode 100644
index 6a80def3a..000000000
--- a/pkg/util/locode/db/boltdb/calls.go
+++ /dev/null
@@ -1,166 +0,0 @@
-package locodebolt
-
-import (
- "encoding/json"
- "errors"
- "fmt"
- "path/filepath"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
- locodedb "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode/db"
- "go.etcd.io/bbolt"
-)
-
-// Open opens an underlying BoltDB instance.
-//
-// Timeout of BoltDB opening is 3s (only for Linux or Darwin).
-//
-// Opens BoltDB in read-only mode if DB is read-only.
-func (db *DB) Open() error {
- // copy-paste from metabase:
- // consider universal Open/Close for BoltDB wrappers
-
- err := util.MkdirAllX(filepath.Dir(db.path), db.mode)
- if err != nil {
- return fmt.Errorf("could not create dir for BoltDB: %w", err)
- }
-
- db.bolt, err = bbolt.Open(db.path, db.mode, db.boltOpts)
- if err != nil {
- return fmt.Errorf("could not open BoltDB: %w", err)
- }
-
- return nil
-}
-
-// Close closes an underlying BoltDB instance.
-//
-// Must not be called before successful Open call.
-func (db *DB) Close() error {
- return db.bolt.Close()
-}
-
-func countryBucketKey(cc *locodedb.CountryCode) ([]byte, error) {
- return []byte(cc.String()), nil
-}
-
-func locationBucketKey(lc *locodedb.LocationCode) ([]byte, error) {
- return []byte(lc.String()), nil
-}
-
-type recordJSON struct {
- CountryName string
- LocationName string
- SubDivName string
- SubDivCode string
- Latitude float64
- Longitude float64
- Continent string
-}
-
-func recordValue(r locodedb.Record) ([]byte, error) {
- p := r.GeoPoint()
-
- rj := &recordJSON{
- CountryName: r.CountryName(),
- LocationName: r.LocationName(),
- SubDivName: r.SubDivName(),
- SubDivCode: r.SubDivCode(),
- Latitude: p.Latitude(),
- Longitude: p.Longitude(),
- Continent: r.Continent().String(),
- }
-
- return json.Marshal(rj)
-}
-
-func recordFromValue(data []byte) (*locodedb.Record, error) {
- rj := new(recordJSON)
-
- if err := json.Unmarshal(data, rj); err != nil {
- return nil, err
- }
-
- r := new(locodedb.Record)
- r.SetCountryName(rj.CountryName)
- r.SetLocationName(rj.LocationName)
- r.SetSubDivName(rj.SubDivName)
- r.SetSubDivCode(rj.SubDivCode)
- r.SetGeoPoint(locodedb.NewPoint(rj.Latitude, rj.Longitude))
-
- cont := locodedb.ContinentFromString(rj.Continent)
- r.SetContinent(&cont)
-
- return r, nil
-}
-
-// Put saves the record by key in an underlying BoltDB instance.
-//
-// Country code from the key is used for allocating the 1st level buckets.
-// Records are stored in country buckets by the location code from the key.
-// The records are stored in internal binary JSON format.
-//
-// Must not be called before successful Open call.
-// Must not be called in read-only mode: behavior is undefined.
-func (db *DB) Put(key locodedb.Key, rec locodedb.Record) error {
- return db.bolt.Batch(func(tx *bbolt.Tx) error {
- countryKey, err := countryBucketKey(key.CountryCode())
- if err != nil {
- return err
- }
-
- bktCountry, err := tx.CreateBucketIfNotExists(countryKey)
- if err != nil {
- return fmt.Errorf("could not create country bucket: %w", err)
- }
-
- locationKey, err := locationBucketKey(key.LocationCode())
- if err != nil {
- return err
- }
-
- cont, err := recordValue(rec)
- if err != nil {
- return err
- }
-
- return bktCountry.Put(locationKey, cont)
- })
-}
-
-var errRecordNotFound = errors.New("record not found")
-
-// Get reads the record by key from underlying BoltDB instance.
-//
-// Returns an error if no record is presented by key in DB.
-//
-// Must not be called before successful Open call.
-func (db *DB) Get(key locodedb.Key) (rec *locodedb.Record, err error) {
- err = db.bolt.View(func(tx *bbolt.Tx) error {
- countryKey, err := countryBucketKey(key.CountryCode())
- if err != nil {
- return err
- }
-
- bktCountry := tx.Bucket(countryKey)
- if bktCountry == nil {
- return errRecordNotFound
- }
-
- locationKey, err := locationBucketKey(key.LocationCode())
- if err != nil {
- return err
- }
-
- data := bktCountry.Get(locationKey)
- if data == nil {
- return errRecordNotFound
- }
-
- rec, err = recordFromValue(data)
-
- return err
- })
-
- return
-}
diff --git a/pkg/util/locode/db/boltdb/db.go b/pkg/util/locode/db/boltdb/db.go
deleted file mode 100644
index 3d09a797d..000000000
--- a/pkg/util/locode/db/boltdb/db.go
+++ /dev/null
@@ -1,73 +0,0 @@
-package locodebolt
-
-import (
- "fmt"
- "io/fs"
-
- "go.etcd.io/bbolt"
-)
-
-// Prm groups the required parameters of the DB's constructor.
-//
-// All values must comply with the requirements imposed on them.
-// Passing incorrect parameter values will result in constructor
-// failure (error or panic depending on the implementation).
-type Prm struct {
- // Path to BoltDB file with FrostFS location database.
- //
- // Must not be empty.
- Path string
-}
-
-// DB is a descriptor of the FrostFS BoltDB location database.
-//
-// For correct operation, DB must be created
-// using the constructor (New) based on the required parameters
-// and optional components.
-//
-// After successful creation,
-// DB must be opened through Open call. After successful opening,
-// DB is ready to work through API (until Close call).
-//
-// Upon completion of work with the DB, it must be closed
-// by Close method.
-type DB struct {
- path string
-
- mode fs.FileMode
-
- boltOpts *bbolt.Options
-
- bolt *bbolt.DB
-}
-
-const invalidPrmValFmt = "invalid parameter %s (%T):%v"
-
-func panicOnPrmValue(n string, v any) {
- panic(fmt.Sprintf(invalidPrmValFmt, n, v, v))
-}
-
-// New creates a new instance of the DB.
-//
-// Panics if at least one value of the parameters is invalid.
-//
-// The created DB requires calling the Open method in order
-// to initialize required resources.
-func New(prm Prm, opts ...Option) *DB {
- switch {
- case prm.Path == "":
- panicOnPrmValue("Path", prm.Path)
- }
-
- o := defaultOpts()
-
- for i := range opts {
- opts[i](o)
- }
-
- return &DB{
- path: prm.Path,
- mode: o.mode,
- boltOpts: o.boltOpts,
- }
-}
diff --git a/pkg/util/locode/db/boltdb/opts.go b/pkg/util/locode/db/boltdb/opts.go
deleted file mode 100644
index db0cccd3a..000000000
--- a/pkg/util/locode/db/boltdb/opts.go
+++ /dev/null
@@ -1,37 +0,0 @@
-package locodebolt
-
-import (
- "io/fs"
- "os"
- "time"
-
- "go.etcd.io/bbolt"
-)
-
-// Option sets an optional parameter of DB.
-type Option func(*options)
-
-type options struct {
- mode fs.FileMode
-
- boltOpts *bbolt.Options
-}
-
-func defaultOpts() *options {
- return &options{
- mode: os.ModePerm, // 0777
- boltOpts: &bbolt.Options{
- Timeout: 3 * time.Second,
- },
- }
-}
-
-// ReadOnly enables read-only mode of the DB.
-//
-// Do not call DB.Put method on instances with
-// this option: the behavior is undefined.
-func ReadOnly() Option {
- return func(o *options) {
- o.boltOpts.ReadOnly = true
- }
-}
diff --git a/pkg/util/locode/db/continent.go b/pkg/util/locode/db/continent.go
deleted file mode 100644
index 863af7b57..000000000
--- a/pkg/util/locode/db/continent.go
+++ /dev/null
@@ -1,81 +0,0 @@
-package locodedb
-
-// Continent is an enumeration of Earth's continent.
-type Continent uint8
-
-const (
- // ContinentUnknown is an undefined Continent value.
- ContinentUnknown = iota
-
- // ContinentEurope corresponds to Europe.
- ContinentEurope
-
- // ContinentAfrica corresponds to Africa.
- ContinentAfrica
-
- // ContinentNorthAmerica corresponds to North America.
- ContinentNorthAmerica
-
- // ContinentSouthAmerica corresponds to South America.
- ContinentSouthAmerica
-
- // ContinentAsia corresponds to Asia.
- ContinentAsia
-
- // ContinentAntarctica corresponds to Antarctica.
- ContinentAntarctica
-
- // ContinentOceania corresponds to Oceania.
- ContinentOceania
-)
-
-// Is checks if c is the same continent as c2.
-func (c *Continent) Is(c2 Continent) bool {
- return *c == c2
-}
-
-func (c Continent) String() string {
- switch c {
- case ContinentUnknown:
- fallthrough
- default:
- return "Unknown"
- case ContinentEurope:
- return "Europe"
- case ContinentAfrica:
- return "Africa"
- case ContinentNorthAmerica:
- return "North America"
- case ContinentSouthAmerica:
- return "South America"
- case ContinentAsia:
- return "Asia"
- case ContinentAntarctica:
- return "Antarctica"
- case ContinentOceania:
- return "Oceania"
- }
-}
-
-// ContinentFromString returns Continent value
-// corresponding to the passed string representation.
-func ContinentFromString(str string) Continent {
- switch str {
- default:
- return ContinentUnknown
- case "Europe":
- return ContinentEurope
- case "Africa":
- return ContinentAfrica
- case "North America":
- return ContinentNorthAmerica
- case "South America":
- return ContinentSouthAmerica
- case "Asia":
- return ContinentAsia
- case "Antarctica":
- return ContinentAntarctica
- case "Oceania":
- return ContinentOceania
- }
-}
diff --git a/pkg/util/locode/db/continents/geojson/calls.go b/pkg/util/locode/db/continents/geojson/calls.go
deleted file mode 100644
index 34467d5a2..000000000
--- a/pkg/util/locode/db/continents/geojson/calls.go
+++ /dev/null
@@ -1,98 +0,0 @@
-package continentsdb
-
-import (
- "fmt"
- "os"
-
- locodedb "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode/db"
- "github.com/paulmach/orb"
- "github.com/paulmach/orb/geojson"
- "github.com/paulmach/orb/planar"
-)
-
-const continentProperty = "Continent"
-
-// PointContinent goes through all polygons and returns the continent
-// in which the point is located.
-//
-// Returns locodedb.ContinentUnknown if no entry matches.
-//
-// All GeoJSON feature are parsed from file once and stored in memory.
-func (db *DB) PointContinent(point *locodedb.Point) (*locodedb.Continent, error) {
- var err error
-
- db.once.Do(func() {
- err = db.init()
- })
-
- if err != nil {
- return nil, err
- }
-
- planarPoint := orb.Point{point.Longitude(), point.Latitude()}
-
- var (
- continent string
- minDst float64
- )
-
- for _, feature := range db.features {
- if multiPolygon, ok := feature.Geometry.(orb.MultiPolygon); ok {
- if planar.MultiPolygonContains(multiPolygon, planarPoint) {
- continent = feature.Properties.MustString(continentProperty)
- break
- }
- } else if polygon, ok := feature.Geometry.(orb.Polygon); ok {
- if planar.PolygonContains(polygon, planarPoint) {
- continent = feature.Properties.MustString(continentProperty)
- break
- }
- }
- distance := planar.DistanceFrom(feature.Geometry, planarPoint)
- if minDst == 0 || minDst > distance {
- minDst = distance
- continent = feature.Properties.MustString(continentProperty)
- }
- }
-
- c := continentFromString(continent)
-
- return &c, nil
-}
-
-func (db *DB) init() error {
- data, err := os.ReadFile(db.path)
- if err != nil {
- return fmt.Errorf("could not read data file: %w", err)
- }
-
- features, err := geojson.UnmarshalFeatureCollection(data)
- if err != nil {
- return fmt.Errorf("could not unmarshal GeoJSON feature collection: %w", err)
- }
-
- db.features = features.Features
-
- return nil
-}
-
-func continentFromString(c string) locodedb.Continent {
- switch c {
- default:
- return locodedb.ContinentUnknown
- case "Africa":
- return locodedb.ContinentAfrica
- case "Asia":
- return locodedb.ContinentAsia
- case "Europe":
- return locodedb.ContinentEurope
- case "North America":
- return locodedb.ContinentNorthAmerica
- case "South America":
- return locodedb.ContinentSouthAmerica
- case "Antarctica":
- return locodedb.ContinentAntarctica
- case "Australia", "Oceania":
- return locodedb.ContinentOceania
- }
-}
diff --git a/pkg/util/locode/db/continents/geojson/db.go b/pkg/util/locode/db/continents/geojson/db.go
deleted file mode 100644
index ee43bd810..000000000
--- a/pkg/util/locode/db/continents/geojson/db.go
+++ /dev/null
@@ -1,63 +0,0 @@
-package continentsdb
-
-import (
- "fmt"
- "sync"
-
- "github.com/paulmach/orb/geojson"
-)
-
-// Prm groups the required parameters of the DB's constructor.
-//
-// All values must comply with the requirements imposed on them.
-// Passing incorrect parameter values will result in constructor
-// failure (error or panic depending on the implementation).
-type Prm struct {
- // Path to polygons of Earth's continents in GeoJSON format.
- //
- // Must not be empty.
- Path string
-}
-
-// DB is a descriptor of the Earth's polygons in GeoJSON format.
-//
-// For correct operation, DB must be created
-// using the constructor (New) based on the required parameters
-// and optional components. After successful creation,
-// The DB is immediately ready to work through API.
-type DB struct {
- path string
-
- once sync.Once
-
- features []*geojson.Feature
-}
-
-const invalidPrmValFmt = "invalid parameter %s (%T):%v"
-
-func panicOnPrmValue(n string, v any) {
- panic(fmt.Sprintf(invalidPrmValFmt, n, v, v))
-}
-
-// New creates a new instance of the DB.
-//
-// Panics if at least one value of the parameters is invalid.
-//
-// The created DB does not require additional
-// initialization and is completely ready for work.
-func New(prm Prm, opts ...Option) *DB {
- switch {
- case prm.Path == "":
- panicOnPrmValue("Path", prm.Path)
- }
-
- o := defaultOpts()
-
- for i := range opts {
- opts[i](o)
- }
-
- return &DB{
- path: prm.Path,
- }
-}
diff --git a/pkg/util/locode/db/continents/geojson/opts.go b/pkg/util/locode/db/continents/geojson/opts.go
deleted file mode 100644
index 59831fcc5..000000000
--- a/pkg/util/locode/db/continents/geojson/opts.go
+++ /dev/null
@@ -1,10 +0,0 @@
-package continentsdb
-
-// Option sets an optional parameter of DB.
-type Option func(*options)
-
-type options struct{}
-
-func defaultOpts() *options {
- return &options{}
-}
diff --git a/pkg/util/locode/db/country.go b/pkg/util/locode/db/country.go
deleted file mode 100644
index 2d13c6ef9..000000000
--- a/pkg/util/locode/db/country.go
+++ /dev/null
@@ -1,32 +0,0 @@
-package locodedb
-
-import (
- "fmt"
-
- locodecolumn "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode/column"
-)
-
-// CountryCode represents a country code for
-// the storage in the FrostFS location database.
-type CountryCode locodecolumn.CountryCode
-
-// CountryCodeFromString parses a string UN/LOCODE country code
-// and returns a CountryCode.
-func CountryCodeFromString(s string) (*CountryCode, error) {
- cc, err := locodecolumn.CountryCodeFromString(s)
- if err != nil {
- return nil, fmt.Errorf("could not parse country code: %w", err)
- }
-
- return CountryFromColumn(cc)
-}
-
-// CountryFromColumn converts a UN/LOCODE country code to a CountryCode.
-func CountryFromColumn(cc *locodecolumn.CountryCode) (*CountryCode, error) {
- return (*CountryCode)(cc), nil
-}
-
-func (c *CountryCode) String() string {
- syms := (*locodecolumn.CountryCode)(c).Symbols()
- return string(syms[:])
-}
diff --git a/pkg/util/locode/db/db.go b/pkg/util/locode/db/db.go
deleted file mode 100644
index 8c71ea794..000000000
--- a/pkg/util/locode/db/db.go
+++ /dev/null
@@ -1,183 +0,0 @@
-package locodedb
-
-import (
- "errors"
- "fmt"
- "runtime"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode"
- "golang.org/x/sync/errgroup"
-)
-
-// SourceTable is an interface of the UN/LOCODE table.
-type SourceTable interface {
- // Must iterate over all entries of the table
- // and pass next entry to the handler.
- //
- // Must return handler's errors directly.
- IterateAll(func(locode.Record) error) error
-}
-
-// DB is an interface of FrostFS location database.
-type DB interface {
- // Must save the record by key in the database.
- Put(Key, Record) error
-
- // Must return the record by key from the database.
- Get(Key) (*Record, error)
-}
-
-// AirportRecord represents the entry in FrostFS airport database.
-type AirportRecord struct {
- // Name of the country where airport is located.
- CountryName string
-
- // Geo point where airport is located.
- Point *Point
-}
-
-// ErrAirportNotFound is returned by AirportRecord readers
-// when the required airport is not found.
-var ErrAirportNotFound = errors.New("airport not found")
-
-// AirportDB is an interface of FrostFS airport database.
-type AirportDB interface {
- // Must return the record by UN/LOCODE table record.
- //
- // Must return ErrAirportNotFound if there is no
- // related airport in the database.
- Get(locode.Record) (*AirportRecord, error)
-}
-
-// ContinentsDB is an interface of FrostFS continent database.
-type ContinentsDB interface {
- // Must return continent of the geo point.
- PointContinent(*Point) (*Continent, error)
-}
-
-var ErrSubDivNotFound = errors.New("subdivision not found")
-
-var ErrCountryNotFound = errors.New("country not found")
-
-// NamesDB is an interface of the FrostFS location namespace.
-type NamesDB interface {
- // Must resolve a country code to a country name.
- //
- // Must return ErrCountryNotFound if there is no
- // country with the provided code.
- CountryName(*CountryCode) (string, error)
-
- // Must resolve (country code, subdivision code) to
- // a subdivision name.
- //
- // Must return ErrSubDivNotFound if either country or
- // subdivision is not presented in database.
- SubDivName(*CountryCode, string) (string, error)
-}
-
-// FillDatabase generates the FrostFS location database based on the UN/LOCODE table.
-func FillDatabase(table SourceTable, airports AirportDB, continents ContinentsDB, names NamesDB, db DB) error {
- var errG errgroup.Group
-
- // Pick some sane default, after this the performance stopped increasing.
- errG.SetLimit(runtime.NumCPU() * 4)
- _ = table.IterateAll(func(tableRecord locode.Record) error {
- errG.Go(func() error {
- return processTableRecord(tableRecord, airports, continents, names, db)
- })
- return nil
- })
- return errG.Wait()
-}
-
-func processTableRecord(tableRecord locode.Record, airports AirportDB, continents ContinentsDB, names NamesDB, db DB) error {
- if tableRecord.LOCODE.LocationCode() == "" {
- return nil
- }
-
- dbKey, err := NewKey(tableRecord.LOCODE)
- if err != nil {
- return err
- }
-
- dbRecord, err := NewRecord(tableRecord)
- if err != nil {
- if errors.Is(err, errParseCoordinates) {
- return nil
- }
-
- return err
- }
-
- geoPoint := dbRecord.GeoPoint()
- countryName := ""
-
- if geoPoint == nil {
- airportRecord, err := airports.Get(tableRecord)
- if err != nil {
- if errors.Is(err, ErrAirportNotFound) {
- return nil
- }
-
- return err
- }
-
- geoPoint = airportRecord.Point
- countryName = airportRecord.CountryName
- }
-
- dbRecord.SetGeoPoint(geoPoint)
-
- if countryName == "" {
- countryName, err = names.CountryName(dbKey.CountryCode())
- if err != nil {
- if errors.Is(err, ErrCountryNotFound) {
- return nil
- }
-
- return err
- }
- }
-
- dbRecord.SetCountryName(countryName)
-
- if subDivCode := dbRecord.SubDivCode(); subDivCode != "" {
- subDivName, err := names.SubDivName(dbKey.CountryCode(), subDivCode)
- if err != nil {
- if errors.Is(err, ErrSubDivNotFound) {
- return nil
- }
-
- return err
- }
-
- dbRecord.SetSubDivName(subDivName)
- }
-
- continent, err := continents.PointContinent(geoPoint)
- if err != nil {
- return fmt.Errorf("could not calculate continent geo point: %w", err)
- } else if continent.Is(ContinentUnknown) {
- return nil
- }
-
- dbRecord.SetContinent(continent)
-
- return db.Put(*dbKey, *dbRecord)
-}
-
-// LocodeRecord returns the record from the FrostFS location database
-// corresponding to the string representation of UN/LOCODE.
-func LocodeRecord(db DB, sLocode string) (*Record, error) {
- lc, err := locode.FromString(sLocode)
- if err != nil {
- return nil, fmt.Errorf("could not parse locode: %w", err)
- }
-
- key, err := NewKey(*lc)
- if err != nil {
- return nil, err
- }
-
- return db.Get(*key)
-}
diff --git a/pkg/util/locode/db/location.go b/pkg/util/locode/db/location.go
deleted file mode 100644
index d22979170..000000000
--- a/pkg/util/locode/db/location.go
+++ /dev/null
@@ -1,32 +0,0 @@
-package locodedb
-
-import (
- "fmt"
-
- locodecolumn "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode/column"
-)
-
-// LocationCode represents a location code for
-// the storage in the FrostFS location database.
-type LocationCode locodecolumn.LocationCode
-
-// LocationCodeFromString parses a string UN/LOCODE location code
-// and returns a LocationCode.
-func LocationCodeFromString(s string) (*LocationCode, error) {
- lc, err := locodecolumn.LocationCodeFromString(s)
- if err != nil {
- return nil, fmt.Errorf("could not parse location code: %w", err)
- }
-
- return LocationFromColumn(lc)
-}
-
-// LocationFromColumn converts a UN/LOCODE country code to a LocationCode.
-func LocationFromColumn(cc *locodecolumn.LocationCode) (*LocationCode, error) {
- return (*LocationCode)(cc), nil
-}
-
-func (l *LocationCode) String() string {
- syms := (*locodecolumn.LocationCode)(l).Symbols()
- return string(syms[:])
-}
diff --git a/pkg/util/locode/db/point.go b/pkg/util/locode/db/point.go
deleted file mode 100644
index 72daebb2c..000000000
--- a/pkg/util/locode/db/point.go
+++ /dev/null
@@ -1,93 +0,0 @@
-package locodedb
-
-import (
- "fmt"
- "strconv"
-
- locodecolumn "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode/column"
-)
-
-// Point represents a 2D geographic point.
-type Point struct {
- lat, lng float64
-}
-
-// NewPoint creates, initializes and returns a new Point.
-func NewPoint(lat, lng float64) *Point {
- return &Point{
- lat: lat,
- lng: lng,
- }
-}
-
-// Latitude returns the Point's latitude.
-func (p Point) Latitude() float64 {
- return p.lat
-}
-
-// Longitude returns the Point's longitude.
-func (p Point) Longitude() float64 {
- return p.lng
-}
-
-// PointFromCoordinates converts a UN/LOCODE coordinates to a Point.
-func PointFromCoordinates(crd *locodecolumn.Coordinates) (*Point, error) {
- if crd == nil {
- return nil, nil
- }
-
- cLat := crd.Latitude()
- cLatDeg := cLat.Degrees()
- cLatMnt := cLat.Minutes()
-
- lat, err := toDecimal(cLatDeg[:], cLatMnt[:])
- if err != nil {
- return nil, fmt.Errorf("could not parse latitude: %w", err)
- }
-
- if !cLat.Hemisphere().North() {
- lat = -lat
- }
-
- cLng := crd.Longitude()
- cLngDeg := cLng.Degrees()
- cLngMnt := cLng.Minutes()
-
- lng, err := toDecimal(cLngDeg[:], cLngMnt[:])
- if err != nil {
- return nil, fmt.Errorf("could not parse longitude: %w", err)
- }
-
- if !cLng.Hemisphere().East() {
- lng = -lng
- }
-
- return &Point{
- lat: lat,
- lng: lng,
- }, nil
-}
-
-func toDecimal(intRaw, minutesRaw []byte) (float64, error) {
- integer, err := strconv.ParseFloat(string(intRaw), 64)
- if err != nil {
- return 0, fmt.Errorf("could not parse integer part: %w", err)
- }
-
- decimal, err := minutesToDegrees(minutesRaw)
- if err != nil {
- return 0, fmt.Errorf("could not parse decimal part: %w", err)
- }
-
- return integer + decimal, nil
-}
-
-// minutesToDegrees converts minutes to decimal part of a degree.
-func minutesToDegrees(raw []byte) (float64, error) {
- minutes, err := strconv.ParseFloat(string(raw), 64)
- if err != nil {
- return 0, err
- }
-
- return minutes / 60, nil
-}
diff --git a/pkg/util/locode/db/point_test.go b/pkg/util/locode/db/point_test.go
deleted file mode 100644
index f91c0cf87..000000000
--- a/pkg/util/locode/db/point_test.go
+++ /dev/null
@@ -1,51 +0,0 @@
-package locodedb
-
-import (
- "testing"
-
- locodecolumn "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode/column"
- "github.com/stretchr/testify/require"
-)
-
-func TestPointFromCoordinates(t *testing.T) {
- testCases := []struct {
- latGot, longGot string
- latWant, longWant float64
- }{
- {
- latGot: "5915N",
- longGot: "01806E",
- latWant: 59.25,
- longWant: 18.10,
- },
- {
- latGot: "1000N",
- longGot: "02030E",
- latWant: 10.00,
- longWant: 20.50,
- },
- {
- latGot: "0145S",
- longGot: "03512W",
- latWant: -01.75,
- longWant: -35.20,
- },
- }
-
- var (
- crd *locodecolumn.Coordinates
- point *Point
- err error
- )
-
- for _, test := range testCases {
- crd, err = locodecolumn.CoordinatesFromString(test.latGot + " " + test.longGot)
- require.NoError(t, err)
-
- point, err = PointFromCoordinates(crd)
- require.NoError(t, err)
-
- require.Equal(t, test.latWant, point.Latitude())
- require.Equal(t, test.longWant, point.Longitude())
- }
-}
diff --git a/pkg/util/locode/db/record.go b/pkg/util/locode/db/record.go
deleted file mode 100644
index 4c414079f..000000000
--- a/pkg/util/locode/db/record.go
+++ /dev/null
@@ -1,140 +0,0 @@
-package locodedb
-
-import (
- "errors"
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode"
- locodecolumn "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode/column"
-)
-
-// Key represents the key in FrostFS location database.
-type Key struct {
- cc *CountryCode
-
- lc *LocationCode
-}
-
-// NewKey calculates Key from LOCODE.
-func NewKey(lc locode.LOCODE) (*Key, error) {
- country, err := CountryCodeFromString(lc.CountryCode())
- if err != nil {
- return nil, fmt.Errorf("could not parse country: %w", err)
- }
-
- location, err := LocationCodeFromString(lc.LocationCode())
- if err != nil {
- return nil, fmt.Errorf("could not parse location: %w", err)
- }
-
- return &Key{
- cc: country,
- lc: location,
- }, nil
-}
-
-// CountryCode returns the location's country code.
-func (k *Key) CountryCode() *CountryCode {
- return k.cc
-}
-
-// LocationCode returns the location code.
-func (k *Key) LocationCode() *LocationCode {
- return k.lc
-}
-
-// Record represents the entry in FrostFS location database.
-type Record struct {
- countryName string
-
- locationName string
-
- subDivName string
-
- subDivCode string
-
- p *Point
-
- cont *Continent
-}
-
-var errParseCoordinates = errors.New("invalid coordinates")
-
-// NewRecord calculates the Record from the UN/LOCODE table record.
-func NewRecord(r locode.Record) (*Record, error) {
- crd, err := locodecolumn.CoordinatesFromString(r.Coordinates)
- if err != nil {
- return nil, fmt.Errorf("%w: %v", errParseCoordinates, err)
- }
-
- point, err := PointFromCoordinates(crd)
- if err != nil {
- return nil, fmt.Errorf("could not parse geo point: %w", err)
- }
-
- return &Record{
- locationName: r.NameWoDiacritics,
- subDivCode: r.SubDiv,
- p: point,
- }, nil
-}
-
-// CountryName returns the country name.
-func (r *Record) CountryName() string {
- return r.countryName
-}
-
-// SetCountryName sets the country name.
-func (r *Record) SetCountryName(name string) {
- r.countryName = name
-}
-
-// LocationName returns the location name.
-func (r *Record) LocationName() string {
- return r.locationName
-}
-
-// SetLocationName sets the location name.
-func (r *Record) SetLocationName(name string) {
- r.locationName = name
-}
-
-// SubDivCode returns the subdivision code.
-func (r *Record) SubDivCode() string {
- return r.subDivCode
-}
-
-// SetSubDivCode sets the subdivision code.
-func (r *Record) SetSubDivCode(name string) {
- r.subDivCode = name
-}
-
-// SubDivName returns the subdivision name.
-func (r *Record) SubDivName() string {
- return r.subDivName
-}
-
-// SetSubDivName sets the subdivision name.
-func (r *Record) SetSubDivName(name string) {
- r.subDivName = name
-}
-
-// GeoPoint returns geo point of the location.
-func (r *Record) GeoPoint() *Point {
- return r.p
-}
-
-// SetGeoPoint sets geo point of the location.
-func (r *Record) SetGeoPoint(p *Point) {
- r.p = p
-}
-
-// Continent returns the location continent.
-func (r *Record) Continent() *Continent {
- return r.cont
-}
-
-// SetContinent sets the location continent.
-func (r *Record) SetContinent(c *Continent) {
- r.cont = c
-}
diff --git a/pkg/util/locode/record.go b/pkg/util/locode/record.go
deleted file mode 100644
index 7db746ff3..000000000
--- a/pkg/util/locode/record.go
+++ /dev/null
@@ -1,83 +0,0 @@
-package locode
-
-import (
- "errors"
- "fmt"
- "strings"
-)
-
-// LOCODE represents code from UN/LOCODE coding scheme.
-type LOCODE [2]string
-
-// Record represents a single record of the UN/LOCODE table.
-type Record struct {
- // Change Indicator.
- Ch string
-
- // Combination of a 2-character country code and a 3-character location code.
- LOCODE LOCODE
-
- // Name of the locations which has been allocated a UN/LOCODE.
- Name string
-
- // Names of the locations which have been allocated a UN/LOCODE without diacritic signs.
- NameWoDiacritics string
-
- // ISO 1-3 character alphabetic and/or numeric code for the administrative division of the country concerned.
- SubDiv string
-
- // 8-digit function classifier code for the location.
- Function string
-
- // Status of the entry by a 2-character code.
- Status string
-
- // Last date when the location was updated/entered.
- Date string
-
- // The IATA code for the location if different from location code in column LOCODE.
- IATA string
-
- // Geographical coordinates (latitude/longitude) of the location, if there is any.
- Coordinates string
-
- // Some general remarks regarding the UN/LOCODE in question.
- Remarks string
-}
-
-// ErrInvalidString is the error of incorrect string format of the LOCODE.
-var ErrInvalidString = errors.New("invalid string format in UN/Locode")
-
-// FromString parses string and returns LOCODE.
-//
-// If string has incorrect format, ErrInvalidString returns.
-func FromString(s string) (*LOCODE, error) {
- const (
- locationSeparator = " "
- locodePartsNumber = 2
- )
-
- words := strings.Split(s, locationSeparator)
- if ln := len(words); ln != locodePartsNumber {
- return nil, fmt.Errorf(
- "incorrect locode: it must consist of %d codes separated with a witespase, got: %d",
- locodePartsNumber,
- ln,
- )
- }
-
- l := new(LOCODE)
- copy(l[:], words)
-
- return l, nil
-}
-
-// CountryCode returns a string representation of country code.
-func (l *LOCODE) CountryCode() string {
- return l[0]
-}
-
-// LocationCode returns a string representation of location code.
-func (l *LOCODE) LocationCode() string {
- return l[1]
-}
diff --git a/pkg/util/locode/table/csv/calls.go b/pkg/util/locode/table/csv/calls.go
deleted file mode 100644
index 5f40865be..000000000
--- a/pkg/util/locode/table/csv/calls.go
+++ /dev/null
@@ -1,156 +0,0 @@
-package csvlocode
-
-import (
- "encoding/csv"
- "errors"
- "io"
- "os"
- "strings"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode"
- locodedb "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode/db"
-)
-
-var errInvalidRecord = errors.New("invalid table record")
-
-// IterateAll scans a table record one-by-one, parses a UN/LOCODE record
-// from it and passes it to f.
-//
-// Returns f's errors directly.
-func (t *Table) IterateAll(f func(locode.Record) error) error {
- const wordsPerRecord = 12
-
- return t.scanWords(t.paths, wordsPerRecord, func(words []string) error {
- lc, err := locode.FromString(strings.Join(words[1:3], " "))
- if err != nil {
- return err
- }
-
- record := locode.Record{
- Ch: words[0],
- LOCODE: *lc,
- Name: words[3],
- NameWoDiacritics: words[4],
- SubDiv: words[5],
- Function: words[6],
- Status: words[7],
- Date: words[8],
- IATA: words[9],
- Coordinates: words[10],
- Remarks: words[11],
- }
-
- return f(record)
- })
-}
-
-const (
- _ = iota - 1
-
- subDivCountry
- subDivSubdivision
- subDivName
- _ // subDivLevel
-
- subDivFldNum
-)
-
-type subDivKey struct {
- countryCode,
- subDivCode string
-}
-
-type subDivRecord struct {
- name string
-}
-
-// SubDivName scans a table record to an in-memory table (once),
-// and returns the subdivision name of the country and the subdivision codes match.
-//
-// Returns locodedb.ErrSubDivNotFound if no entry matches.
-func (t *Table) SubDivName(countryCode *locodedb.CountryCode, code string) (string, error) {
- if err := t.initSubDiv(); err != nil {
- return "", err
- }
-
- rec, ok := t.mSubDiv[subDivKey{
- countryCode: countryCode.String(),
- subDivCode: code,
- }]
- if !ok {
- return "", locodedb.ErrSubDivNotFound
- }
-
- return rec.name, nil
-}
-
-func (t *Table) initSubDiv() (err error) {
- t.subDivOnce.Do(func() {
- t.mSubDiv = make(map[subDivKey]subDivRecord)
-
- err = t.scanWords([]string{t.subDivPath}, subDivFldNum, func(words []string) error {
- t.mSubDiv[subDivKey{
- countryCode: words[subDivCountry],
- subDivCode: words[subDivSubdivision],
- }] = subDivRecord{
- name: words[subDivName],
- }
-
- return nil
- })
- })
-
- return
-}
-
-var errScanInt = errors.New("interrupt scan")
-
-func (t *Table) scanWords(paths []string, fpr int, wordsHandler func([]string) error) error {
- var (
- rdrs = make([]io.Reader, 0, len(t.paths))
- closers = make([]io.Closer, 0, len(t.paths))
- )
-
- for i := range paths {
- file, err := os.OpenFile(paths[i], os.O_RDONLY, t.mode)
- if err != nil {
- return err
- }
-
- rdrs = append(rdrs, file)
- closers = append(closers, file)
- }
-
- defer func() {
- for i := range closers {
- _ = closers[i].Close()
- }
- }()
-
- r := csv.NewReader(io.MultiReader(rdrs...))
- r.ReuseRecord = true
- r.FieldsPerRecord = fpr
-
- for {
- words, err := r.Read()
- if err != nil {
- if errors.Is(err, io.EOF) {
- break
- }
-
- return err
- } else if len(words) != fpr {
- return errInvalidRecord
- }
-
- if err := wordsHandler(words); err != nil {
- if errors.Is(err, errScanInt) {
- break
- }
-
- return err
- }
- }
-
- return nil
-}
diff --git a/pkg/util/locode/table/csv/opts.go b/pkg/util/locode/table/csv/opts.go
deleted file mode 100644
index 68e442899..000000000
--- a/pkg/util/locode/table/csv/opts.go
+++ /dev/null
@@ -1,28 +0,0 @@
-package csvlocode
-
-import (
- "io/fs"
-)
-
-// Option sets an optional parameter of Table.
-type Option func(*options)
-
-type options struct {
- mode fs.FileMode
-
- extraPaths []string
-}
-
-func defaultOpts() *options {
- return &options{
- mode: 0o700,
- }
-}
-
-// WithExtraPaths returns an option to add extra paths
-// to UN/LOCODE tables in csv format.
-func WithExtraPaths(ps ...string) Option {
- return func(o *options) {
- o.extraPaths = append(o.extraPaths, ps...)
- }
-}
diff --git a/pkg/util/locode/table/csv/table.go b/pkg/util/locode/table/csv/table.go
deleted file mode 100644
index b84c2b705..000000000
--- a/pkg/util/locode/table/csv/table.go
+++ /dev/null
@@ -1,75 +0,0 @@
-package csvlocode
-
-import (
- "fmt"
- "io/fs"
- "sync"
-)
-
-// Prm groups the required parameters of the Table's constructor.
-//
-// All values must comply with the requirements imposed on them.
-// Passing incorrect parameter values will result in constructor
-// failure (error or panic depending on the implementation).
-type Prm struct {
- // Path to UN/LOCODE csv table.
- //
- // Must not be empty.
- Path string
-
- // Path to csv table of UN/LOCODE Subdivisions.
- //
- // Must not be empty.
- SubDivPath string
-}
-
-// Table is a descriptor of the UN/LOCODE table in csv format.
-//
-// For correct operation, Table must be created
-// using the constructor (New) based on the required parameters
-// and optional components. After successful creation,
-// The Table is immediately ready to work through API.
-type Table struct {
- paths []string
-
- mode fs.FileMode
-
- subDivPath string
-
- subDivOnce sync.Once
-
- mSubDiv map[subDivKey]subDivRecord
-}
-
-const invalidPrmValFmt = "invalid parameter %s (%T):%v"
-
-func panicOnPrmValue(n string, v any) {
- panic(fmt.Sprintf(invalidPrmValFmt, n, v, v))
-}
-
-// New creates a new instance of the Table.
-//
-// Panics if at least one value of the parameters is invalid.
-//
-// The created Table does not require additional
-// initialization and is completely ready for work.
-func New(prm Prm, opts ...Option) *Table {
- switch {
- case prm.Path == "":
- panicOnPrmValue("Path", prm.Path)
- case prm.SubDivPath == "":
- panicOnPrmValue("SubDivPath", prm.SubDivPath)
- }
-
- o := defaultOpts()
-
- for i := range opts {
- opts[i](o)
- }
-
- return &Table{
- paths: append(o.extraPaths, prm.Path),
- mode: o.mode,
- subDivPath: prm.SubDivPath,
- }
-}
From 8398a8b4b3d4eff5f3759a5d5dafaa4ba9046a4c Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Tue, 23 Jul 2024 17:46:02 +0300
Subject: [PATCH 007/705] [#1271] getSvc: Fix `head --raw` assemble for EC
Signed-off-by: Dmitrii Stepanov
---
pkg/services/object/get/assembleec.go | 4 +-
pkg/services/object/get/assemblerec.go | 64 --------------------------
pkg/services/object/get/get.go | 4 ++
pkg/services/object/get/remote.go | 18 ++++++--
4 files changed, 20 insertions(+), 70 deletions(-)
diff --git a/pkg/services/object/get/assembleec.go b/pkg/services/object/get/assembleec.go
index 7bbd9ca1e..a58602bf7 100644
--- a/pkg/services/object/get/assembleec.go
+++ b/pkg/services/object/get/assembleec.go
@@ -11,7 +11,7 @@ import (
)
func (r *request) assembleEC(ctx context.Context) {
- if r.isRaw() && r.isLocal() {
+ if r.isRaw() {
r.log.Debug(logs.GetCanNotAssembleTheObject)
return
}
@@ -43,7 +43,7 @@ func (r *request) assembleEC(ctx context.Context) {
}
r.prm.common = r.prm.common.WithLocalOnly(false)
- assembler := newAssemblerEC(r.address(), r.infoEC, r.ctxRange(), r, r.localStorage, r.containerSource, r.log, r.headOnly(), r.isRaw(), r.traverserGenerator, r.curProcEpoch)
+ assembler := newAssemblerEC(r.address(), r.infoEC, r.ctxRange(), r, r.localStorage, r.containerSource, r.log, r.headOnly(), r.traverserGenerator, r.curProcEpoch)
r.log.Debug(logs.GetAssemblingECObject,
zap.Uint64("range_offset", r.ctxRange().GetOffset()),
diff --git a/pkg/services/object/get/assemblerec.go b/pkg/services/object/get/assemblerec.go
index a4021ee5e..d64984d5c 100644
--- a/pkg/services/object/get/assemblerec.go
+++ b/pkg/services/object/get/assemblerec.go
@@ -37,7 +37,6 @@ type assemblerec struct {
cs container.Source
log *logger.Logger
head bool
- raw bool
traverserGenerator traverserGenerator
epoch uint64
}
@@ -51,7 +50,6 @@ func newAssemblerEC(
cs container.Source,
log *logger.Logger,
head bool,
- raw bool,
tg traverserGenerator,
epoch uint64,
) *assemblerec {
@@ -64,7 +62,6 @@ func newAssemblerEC(
cs: cs,
log: log,
head: head,
- raw: raw,
traverserGenerator: tg,
epoch: epoch,
}
@@ -74,9 +71,6 @@ func newAssemblerEC(
// It returns parent object.
func (a *assemblerec) Assemble(ctx context.Context, writer ObjectWriter) (*objectSDK.Object, error) {
switch {
- case a.raw:
- err := a.reconstructRawError(ctx)
- return nil, err
case a.head:
return a.reconstructHeader(ctx, writer)
case a.rng != nil:
@@ -149,56 +143,6 @@ func (a *assemblerec) reconstructObjectFromParts(ctx context.Context, headers bo
return c.Reconstruct(parts)
}
-func (a *assemblerec) reconstructRawError(ctx context.Context) error {
- chunks := make(map[string]objectSDK.ECChunk)
- var chunksGuard sync.Mutex
- for _, ch := range a.ecInfo.localChunks {
- chunks[string(ch.ID.GetValue())] = ch
- }
-
- objID := a.addr.Object()
- trav, _, err := a.traverserGenerator.GenerateTraverser(a.addr.Container(), &objID, a.epoch)
- if err != nil {
- return err
- }
-
- eg, ctx := errgroup.WithContext(ctx)
- for {
- batch := trav.Next()
- if len(batch) == 0 {
- break
- }
- for _, node := range batch {
- var info client.NodeInfo
- client.NodeInfoFromNetmapElement(&info, node)
- eg.Go(func() error {
- select {
- case <-ctx.Done():
- return ctx.Err()
- default:
- }
-
- if _, found := a.ecInfo.remoteChunks[string(info.PublicKey())]; found {
- return nil
- }
-
- nodeChunks := a.tryGetChunkListFromNode(ctx, info)
-
- chunksGuard.Lock()
- defer chunksGuard.Unlock()
- for _, ch := range nodeChunks {
- chunks[string(ch.ID.GetValue())] = ch
- }
- return nil
- })
- }
- }
- if err = eg.Wait(); err != nil {
- return err
- }
- return createECInfoErr(chunks)
-}
-
func (a *assemblerec) retrieveParts(ctx context.Context, trav *placement.Traverser, cnr *container.Container) []*objectSDK.Object {
dataCount := policy.ECDataCount(cnr.Value.PlacementPolicy())
parityCount := policy.ECParityCount(cnr.Value.PlacementPolicy())
@@ -359,11 +303,3 @@ func (a *assemblerec) tryGetChunkFromRemoteStorage(ctx context.Context, node cli
}
return object
}
-
-func createECInfoErr(chunks map[string]objectSDK.ECChunk) *objectSDK.ECInfoError {
- info := objectSDK.NewECInfo()
- for _, ch := range chunks {
- info.AddChunk(ch)
- }
- return objectSDK.NewECInfoError(info)
-}
diff --git a/pkg/services/object/get/get.go b/pkg/services/object/get/get.go
index 5a57bc56e..07a2f3a72 100644
--- a/pkg/services/object/get/get.go
+++ b/pkg/services/object/get/get.go
@@ -111,6 +111,10 @@ func (exec *request) analyzeStatus(ctx context.Context, execCnr bool) {
exec.log.Debug(logs.GetRequestedRangeIsOutOfObjectBounds)
case statusEC:
exec.log.Debug(logs.GetRequestedObjectIsEC)
+ if exec.isRaw() && execCnr {
+ exec.executeOnContainer(ctx)
+ exec.analyzeStatus(ctx, false)
+ }
exec.assembleEC(ctx)
default:
exec.log.Debug(logs.OperationFinishedWithError,
diff --git a/pkg/services/object/get/remote.go b/pkg/services/object/get/remote.go
index 4dee15242..ce9abfe1c 100644
--- a/pkg/services/object/get/remote.go
+++ b/pkg/services/object/get/remote.go
@@ -35,8 +35,12 @@ func (r *request) processNode(ctx context.Context, info client.NodeInfo) bool {
switch {
default:
r.log.Debug(logs.GetRemoteCallFailed, zap.Error(err))
- r.status = statusUndefined
- r.err = new(apistatus.ObjectNotFound)
+ if r.status != statusEC {
+ // for raw requests, continue to collect other parts
+ r.status = statusUndefined
+ r.err = new(apistatus.ObjectNotFound)
+ }
+ return false
case err == nil:
r.status = statusOK
r.err = nil
@@ -48,22 +52,28 @@ func (r *request) processNode(ctx context.Context, info client.NodeInfo) bool {
r.collectedObject = obj
r.writeCollectedObject(ctx)
}
+ return true
case errors.As(err, &errRemoved):
r.status = statusINHUMED
r.err = errRemoved
+ return true
case errors.As(err, &errOutOfRange):
r.status = statusOutOfRange
r.err = errOutOfRange
+ return true
case errors.As(err, &errSplitInfo):
r.status = statusVIRTUAL
mergeSplitInfo(r.splitInfo(), errSplitInfo.SplitInfo())
r.err = objectSDK.NewSplitInfoError(r.infoSplit)
+ return true
case errors.As(err, &errECInfo):
r.status = statusEC
r.err = r.infoEC.addRemote(string(info.PublicKey()), errECInfo.ECInfo())
+ if r.isRaw() {
+ return false // continue to collect all parts
+ }
+ return true
}
-
- return r.status != statusUndefined
}
func (r *request) getRemote(ctx context.Context, rs remoteStorage, info client.NodeInfo) (*objectSDK.Object, error) {
From 7fd7961dfa9ffef87f17382da758c4d74b54cd4f Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Tue, 23 Jul 2024 22:15:13 +0300
Subject: [PATCH 008/705] [#1271] getSvc: Fix local EC chunk get
Signed-off-by: Dmitrii Stepanov
---
pkg/services/object/get/assemblerec.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/pkg/services/object/get/assemblerec.go b/pkg/services/object/get/assemblerec.go
index d64984d5c..6a02673c3 100644
--- a/pkg/services/object/get/assemblerec.go
+++ b/pkg/services/object/get/assemblerec.go
@@ -237,7 +237,7 @@ func (a *assemblerec) tryGetChunkFromLocalStorage(ctx context.Context, ch object
return nil
}
var addr oid.Address
- addr.SetContainer(addr.Container())
+ addr.SetContainer(a.addr.Container())
addr.SetObject(objID)
var object *objectSDK.Object
if a.head {
From dd459d399f11d8e49fa841f1372563453bc41a77 Mon Sep 17 00:00:00 2001
From: Airat Arifullin
Date: Thu, 25 Jul 2024 22:52:48 +0300
Subject: [PATCH 009/705] [#1274] go.mod: Update neo-go version that fixes
ws-client
* Update go.mod;
* This neo-go package version contains fix for the wsclient that
allows to morph event listener refresh the invalidated websocket
connection to neo-go.
Signed-off-by: Airat Arifullin
---
go.mod | 2 +-
go.sum | 4 ++--
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/go.mod b/go.mod
index 6a97f7850..c63eabe0d 100644
--- a/go.mod
+++ b/go.mod
@@ -127,4 +127,4 @@ require (
rsc.io/tmplfunc v0.0.3 // indirect
)
-replace github.com/nspcc-dev/neo-go => git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20240611123832-594f716b3d18
+replace github.com/nspcc-dev/neo-go => git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20240726093631-5481339d6928
diff --git a/go.sum b/go.sum
index d4cf863fa..965bffd2d 100644
--- a/go.sum
+++ b/go.sum
@@ -14,8 +14,8 @@ git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240617140730-1a5886e776de
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240617140730-1a5886e776de/go.mod h1:4AObM67VUqkXQJlODTFThFnuMGEuK8h9DrAXHDZqvCU=
git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc=
git.frostfs.info/TrueCloudLab/hrw v1.2.1/go.mod h1:C1Ygde2n843yTZEQ0FP69jYiuaYV0kriLvP4zm8JuvM=
-git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20240611123832-594f716b3d18 h1:JRjwcHaQajTbSCBCK3yZnqvyHvgWBaoThDGuT4kvIIc=
-git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20240611123832-594f716b3d18/go.mod h1:bZyJexBlrja4ngxiBgo8by5pVHuAbhg9l09/8yVGDyg=
+git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20240726093631-5481339d6928 h1:LK3mCkNZkY48eBA9jnk1N0eQZLsZhOG+XYw4EBoKUjM=
+git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20240726093631-5481339d6928/go.mod h1:bZyJexBlrja4ngxiBgo8by5pVHuAbhg9l09/8yVGDyg=
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240712081403-2628f6184984 h1:O3F2Grz07RWZ68mRz1xsYsNPNvZLwY00BM+xoYb1kNk=
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240712081403-2628f6184984/go.mod h1:SgioiGhQNWqiV5qpFAXRDJF81SEFRBhtwGEiU0FViyA=
git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0 h1:M2KR3iBj7WpY3hP10IevfIB9MURr4O9mwVfJ+SjT3HA=
From 8377372a40a699a029f185be6e355748931e8234 Mon Sep 17 00:00:00 2001
From: Evgenii Stratonikov
Date: Fri, 26 Jul 2024 16:37:05 +0300
Subject: [PATCH 010/705] [#1276] go.mod: Update api-go
Signed-off-by: Evgenii Stratonikov
---
cmd/frostfs-cli/internal/client/client.go | 23 ----
cmd/frostfs-cli/modules/container/root.go | 3 -
cmd/frostfs-cli/modules/container/set_eacl.go | 108 ------------------
cmd/frostfs-cli/modules/container/util.go | 5 +-
go.mod | 6 +-
go.sum | 8 +-
pkg/core/client/client.go | 1 -
pkg/morph/client/container/load.go | 40 -------
pkg/network/cache/multi.go | 9 --
.../transport/container/grpc/service.go | 30 -----
pkg/services/container/ape.go | 21 ----
pkg/services/container/ape_test.go | 90 ---------------
pkg/services/container/audit.go | 30 -----
pkg/services/container/executor.go | 19 ---
pkg/services/container/morph/executor.go | 6 -
pkg/services/container/server.go | 2 -
pkg/services/container/sign.go | 18 ---
17 files changed, 9 insertions(+), 410 deletions(-)
delete mode 100644 cmd/frostfs-cli/modules/container/set_eacl.go
diff --git a/cmd/frostfs-cli/internal/client/client.go b/cmd/frostfs-cli/internal/client/client.go
index a6d9968c5..215490dbe 100644
--- a/cmd/frostfs-cli/internal/client/client.go
+++ b/cmd/frostfs-cli/internal/client/client.go
@@ -214,29 +214,6 @@ func EACL(ctx context.Context, prm EACLPrm) (res EACLRes, err error) {
return
}
-// SetEACLPrm groups parameters of SetEACL operation.
-type SetEACLPrm struct {
- Client *client.Client
- ClientParams client.PrmContainerSetEACL
-}
-
-// SetEACLRes groups the resulting values of SetEACL operation.
-type SetEACLRes struct{}
-
-// SetEACL requests to save an eACL table in FrostFS.
-//
-// Operation is asynchronous and no guaranteed even in the absence of errors.
-// The required time is also not predictable.
-//
-// Success can be verified by reading by container identifier.
-//
-// Returns any error which prevented the operation from completing correctly in error return.
-func SetEACL(ctx context.Context, prm SetEACLPrm) (res SetEACLRes, err error) {
- _, err = prm.Client.ContainerSetEACL(ctx, prm.ClientParams)
-
- return
-}
-
// NetworkInfoPrm groups parameters of NetworkInfo operation.
type NetworkInfoPrm struct {
Client *client.Client
diff --git a/cmd/frostfs-cli/modules/container/root.go b/cmd/frostfs-cli/modules/container/root.go
index 99d1a4231..d5f0fd776 100644
--- a/cmd/frostfs-cli/modules/container/root.go
+++ b/cmd/frostfs-cli/modules/container/root.go
@@ -26,7 +26,6 @@ func init() {
listContainerObjectsCmd,
getContainerInfoCmd,
getExtendedACLCmd,
- setExtendedACLCmd,
containerNodesCmd,
policyPlaygroundCmd,
}
@@ -39,7 +38,6 @@ func init() {
initContainerListObjectsCmd()
initContainerInfoCmd()
initContainerGetEACLCmd()
- initContainerSetEACLCmd()
initContainerNodesCmd()
initContainerPolicyPlaygroundCmd()
@@ -53,7 +51,6 @@ func init() {
}{
{createContainerCmd, "PUT"},
{deleteContainerCmd, "DELETE"},
- {setExtendedACLCmd, "SETEACL"},
} {
commonflags.InitSession(el.cmd, "container "+el.verb)
}
diff --git a/cmd/frostfs-cli/modules/container/set_eacl.go b/cmd/frostfs-cli/modules/container/set_eacl.go
deleted file mode 100644
index 86aa50a57..000000000
--- a/cmd/frostfs-cli/modules/container/set_eacl.go
+++ /dev/null
@@ -1,108 +0,0 @@
-package container
-
-import (
- "bytes"
- "errors"
- "time"
-
- internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
- commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
- "github.com/spf13/cobra"
-)
-
-var flagVarsSetEACL struct {
- noPreCheck bool
-
- srcPath string
-}
-
-var setExtendedACLCmd = &cobra.Command{
- Use: "set-eacl",
- Short: "Set new extended ACL table for container",
- Long: `Set new extended ACL table for container.
-Container ID in EACL table will be substituted with ID from the CLI.`,
- Run: func(cmd *cobra.Command, _ []string) {
- id := parseContainerID(cmd)
- eaclTable := common.ReadEACL(cmd, flagVarsSetEACL.srcPath)
-
- tok := getSession(cmd)
-
- eaclTable.SetCID(id)
-
- pk := key.GetOrGenerate(cmd)
- cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC)
-
- if !flagVarsSetEACL.noPreCheck {
- cmd.Println("Checking the ability to modify access rights in the container...")
-
- extendable, err := internalclient.IsACLExtendable(cmd.Context(), cli, id)
- commonCmd.ExitOnErr(cmd, "Extensibility check failure: %w", err)
-
- if !extendable {
- commonCmd.ExitOnErr(cmd, "", errors.New("container ACL is immutable"))
- }
-
- cmd.Println("ACL extension is enabled in the container, continue processing.")
- }
-
- setEACLPrm := internalclient.SetEACLPrm{
- Client: cli,
- ClientParams: client.PrmContainerSetEACL{
- Table: eaclTable,
- Session: tok,
- },
- }
-
- _, err := internalclient.SetEACL(cmd.Context(), setEACLPrm)
- commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
-
- if containerAwait {
- exp, err := eaclTable.Marshal()
- commonCmd.ExitOnErr(cmd, "broken EACL table: %w", err)
-
- cmd.Println("awaiting...")
-
- getEACLPrm := internalclient.EACLPrm{
- Client: cli,
- ClientParams: client.PrmContainerEACL{
- ContainerID: &id,
- },
- }
-
- for i := 0; i < awaitTimeout; i++ {
- time.Sleep(1 * time.Second)
-
- res, err := internalclient.EACL(cmd.Context(), getEACLPrm)
- if err == nil {
- // compare binary values because EACL could have been set already
- table := res.EACL()
- got, err := table.Marshal()
- if err != nil {
- continue
- }
-
- if bytes.Equal(exp, got) {
- cmd.Println("EACL has been persisted on sidechain")
- return
- }
- }
- }
-
- commonCmd.ExitOnErr(cmd, "", errSetEACLTimeout)
- }
- },
-}
-
-func initContainerSetEACLCmd() {
- commonflags.Init(setExtendedACLCmd)
-
- flags := setExtendedACLCmd.Flags()
- flags.StringVar(&containerID, commonflags.CIDFlag, "", commonflags.CIDFlagUsage)
- flags.StringVar(&flagVarsSetEACL.srcPath, "table", "", "path to file with JSON or binary encoded EACL table")
- flags.BoolVar(&containerAwait, "await", false, "block execution until EACL is persisted")
- flags.BoolVar(&flagVarsSetEACL.noPreCheck, "no-precheck", false, "do not pre-check the extensibility of the container ACL")
-}
diff --git a/cmd/frostfs-cli/modules/container/util.go b/cmd/frostfs-cli/modules/container/util.go
index 48265f785..4cb268ec5 100644
--- a/cmd/frostfs-cli/modules/container/util.go
+++ b/cmd/frostfs-cli/modules/container/util.go
@@ -18,9 +18,8 @@ const (
)
var (
- errCreateTimeout = errors.New("timeout: container has not been persisted on sidechain")
- errDeleteTimeout = errors.New("timeout: container has not been removed from sidechain")
- errSetEACLTimeout = errors.New("timeout: EACL has not been persisted on sidechain")
+ errCreateTimeout = errors.New("timeout: container has not been persisted on sidechain")
+ errDeleteTimeout = errors.New("timeout: container has not been removed from sidechain")
)
func parseContainerID(cmd *cobra.Command) cid.ID {
diff --git a/go.mod b/go.mod
index c63eabe0d..09a098502 100644
--- a/go.mod
+++ b/go.mod
@@ -4,12 +4,12 @@ go 1.21
require (
code.gitea.io/sdk/gitea v0.17.1
- git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240530152826-2f6d3209e1d3
+ git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240726072425-3dfa2f4fd65e
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20231101111734-b3ad3335ff65
- git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240617140730-1a5886e776de
+ git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240726111349-9da46f566fec
git.frostfs.info/TrueCloudLab/hrw v1.2.1
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240712081403-2628f6184984
git.frostfs.info/TrueCloudLab/tzhash v1.8.0
@@ -24,7 +24,7 @@ require (
github.com/mitchellh/go-homedir v1.1.0
github.com/mr-tron/base58 v1.2.0
github.com/multiformats/go-multiaddr v0.12.1
- github.com/nspcc-dev/neo-go v0.106.0
+ github.com/nspcc-dev/neo-go v0.106.2
github.com/olekukonko/tablewriter v0.0.5
github.com/panjf2000/ants/v2 v2.9.0
github.com/prometheus/client_golang v1.19.0
diff --git a/go.sum b/go.sum
index 965bffd2d..1034ff61f 100644
--- a/go.sum
+++ b/go.sum
@@ -1,7 +1,7 @@
code.gitea.io/sdk/gitea v0.17.1 h1:3jCPOG2ojbl8AcfaUCRYLT5MUcBMFwS0OSK2mA5Zok8=
code.gitea.io/sdk/gitea v0.17.1/go.mod h1:aCnBqhHpoEWA180gMbaCtdX9Pl6BWBAuuP2miadoTNM=
-git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240530152826-2f6d3209e1d3 h1:H5GvrVlowIMWfzqQkhY0p0myooJxQ1sMRVSFfXawwWg=
-git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240530152826-2f6d3209e1d3/go.mod h1:OBDSr+DqV1z4VDouoX3YMleNc4DPBVBWTG3WDT2PK1o=
+git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240726072425-3dfa2f4fd65e h1:gEWT+70E/RvGkxtSv+PlyUN2vtJVymhQa1mypvrXukM=
+git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240726072425-3dfa2f4fd65e/go.mod h1:OBDSr+DqV1z4VDouoX3YMleNc4DPBVBWTG3WDT2PK1o=
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e h1:kcBqZBiFIUBATUqEuvVigtkJJWQ2Gug/eYXn967o3M4=
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e/go.mod h1:F/fe1OoIDKr5Bz99q4sriuHDuf3aZefZy9ZsCqEtgxc=
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSVCB8JNSfPG7Uk4r2oSk=
@@ -10,8 +10,8 @@ git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d/go.mod h1:7ZZq8iguY7qFsXajdHGmZd2AW4QbucyrJwhbsRfOfek=
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20231101111734-b3ad3335ff65 h1:PaZ8GpnUoXxUoNsc1qp36bT2u7FU+neU4Jn9cl8AWqI=
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20231101111734-b3ad3335ff65/go.mod h1:6aAX80dvJ3r5fjN9CzzPglRptoiPgIC9KFGGsUA+1Hw=
-git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240617140730-1a5886e776de h1:OjsWY0jpGJV1t87XgwL/3PsDx7fJ6lfNMXtY8UhoUbM=
-git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240617140730-1a5886e776de/go.mod h1:4AObM67VUqkXQJlODTFThFnuMGEuK8h9DrAXHDZqvCU=
+git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240726111349-9da46f566fec h1:A09Swh7yogmmiABUf7Ht6MTQXJ07MyGx4+ziUQNelec=
+git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240726111349-9da46f566fec/go.mod h1:DlJmgV4/qkFkx2ab+YWznlMijiF2yZHnrJswJOB7XGs=
git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc=
git.frostfs.info/TrueCloudLab/hrw v1.2.1/go.mod h1:C1Ygde2n843yTZEQ0FP69jYiuaYV0kriLvP4zm8JuvM=
git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20240726093631-5481339d6928 h1:LK3mCkNZkY48eBA9jnk1N0eQZLsZhOG+XYw4EBoKUjM=
diff --git a/pkg/core/client/client.go b/pkg/core/client/client.go
index 8c92901f2..854fbc49f 100644
--- a/pkg/core/client/client.go
+++ b/pkg/core/client/client.go
@@ -11,7 +11,6 @@ import (
// Client is an interface of FrostFS storage
// node's client.
type Client interface {
- ContainerAnnounceUsedSpace(context.Context, client.PrmAnnounceSpace) (*client.ResAnnounceSpace, error)
ObjectPutInit(context.Context, client.PrmObjectPutInit) (client.ObjectWriter, error)
ObjectPutSingle(context.Context, client.PrmObjectPutSingle) (*client.ResObjectPutSingle, error)
ObjectDelete(context.Context, client.PrmObjectDelete) (*client.ResObjectDelete, error)
diff --git a/pkg/morph/client/container/load.go b/pkg/morph/client/container/load.go
index b5263d7a6..5e2c3c2c3 100644
--- a/pkg/morph/client/container/load.go
+++ b/pkg/morph/client/container/load.go
@@ -1,53 +1,13 @@
package container
import (
- "crypto/sha256"
"fmt"
v2refs "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
)
-// AnnounceLoadPrm groups parameters of AnnounceLoad operation.
-type AnnounceLoadPrm struct {
- a container.SizeEstimation
- key []byte
-
- client.InvokePrmOptional
-}
-
-// SetAnnouncement sets announcement.
-func (a2 *AnnounceLoadPrm) SetAnnouncement(a container.SizeEstimation) {
- a2.a = a
-}
-
-// SetReporter sets public key of the reporter.
-func (a2 *AnnounceLoadPrm) SetReporter(key []byte) {
- a2.key = key
-}
-
-// AnnounceLoad saves container size estimation calculated by storage node
-// with key in FrostFS system through Container contract call.
-//
-// Returns any error encountered that caused the saving to interrupt.
-func (c *Client) AnnounceLoad(p AnnounceLoadPrm) error {
- binCnr := make([]byte, sha256.Size)
- p.a.Container().Encode(binCnr)
-
- prm := client.InvokePrm{}
- prm.SetMethod(putSizeMethod)
- prm.SetArgs(p.a.Epoch(), binCnr, p.a.Value(), p.key)
- prm.InvokePrmOptional = p.InvokePrmOptional
-
- _, err := c.client.Invoke(prm)
- if err != nil {
- return fmt.Errorf("could not invoke method (%s): %w", putSizeMethod, err)
- }
- return nil
-}
-
// EstimationID is an identity of container load estimation inside Container contract.
type EstimationID []byte
diff --git a/pkg/network/cache/multi.go b/pkg/network/cache/multi.go
index f19510d76..9305c143b 100644
--- a/pkg/network/cache/multi.go
+++ b/pkg/network/cache/multi.go
@@ -239,15 +239,6 @@ func (x *multiClient) ObjectPutSingle(ctx context.Context, p client.PrmObjectPut
return
}
-func (x *multiClient) ContainerAnnounceUsedSpace(ctx context.Context, prm client.PrmAnnounceSpace) (res *client.ResAnnounceSpace, err error) {
- err = x.iterateClients(ctx, func(c clientcore.Client) error {
- res, err = c.ContainerAnnounceUsedSpace(ctx, prm)
- return err
- })
-
- return
-}
-
func (x *multiClient) ObjectDelete(ctx context.Context, p client.PrmObjectDelete) (res *client.ResObjectDelete, err error) {
err = x.iterateClients(ctx, func(c clientcore.Client) error {
res, err = c.ObjectDelete(ctx, p)
diff --git a/pkg/network/transport/container/grpc/service.go b/pkg/network/transport/container/grpc/service.go
index ed514d6d4..f0206dd5c 100644
--- a/pkg/network/transport/container/grpc/service.go
+++ b/pkg/network/transport/container/grpc/service.go
@@ -81,21 +81,6 @@ func (s *Server) List(ctx context.Context, req *containerGRPC.ListRequest) (*con
return resp.ToGRPCMessage().(*containerGRPC.ListResponse), nil
}
-// SetExtendedACL converts gRPC SetExtendedACLRequest message and passes it to internal Container service.
-func (s *Server) SetExtendedACL(ctx context.Context, req *containerGRPC.SetExtendedACLRequest) (*containerGRPC.SetExtendedACLResponse, error) {
- setEACLReq := new(container.SetExtendedACLRequest)
- if err := setEACLReq.FromGRPCMessage(req); err != nil {
- return nil, err
- }
-
- resp, err := s.srv.SetExtendedACL(ctx, setEACLReq)
- if err != nil {
- return nil, err
- }
-
- return resp.ToGRPCMessage().(*containerGRPC.SetExtendedACLResponse), nil
-}
-
// GetExtendedACL converts gRPC GetExtendedACLRequest message and passes it to internal Container service.
func (s *Server) GetExtendedACL(ctx context.Context, req *containerGRPC.GetExtendedACLRequest) (*containerGRPC.GetExtendedACLResponse, error) {
getEACLReq := new(container.GetExtendedACLRequest)
@@ -110,18 +95,3 @@ func (s *Server) GetExtendedACL(ctx context.Context, req *containerGRPC.GetExten
return resp.ToGRPCMessage().(*containerGRPC.GetExtendedACLResponse), nil
}
-
-// AnnounceUsedSpace converts gRPC AnnounceUsedSpaceRequest message and passes it to internal Container service.
-func (s *Server) AnnounceUsedSpace(ctx context.Context, req *containerGRPC.AnnounceUsedSpaceRequest) (*containerGRPC.AnnounceUsedSpaceResponse, error) {
- announceReq := new(container.AnnounceUsedSpaceRequest)
- if err := announceReq.FromGRPCMessage(req); err != nil {
- return nil, err
- }
-
- resp, err := s.srv.AnnounceUsedSpace(ctx, announceReq)
- if err != nil {
- return nil, err
- }
-
- return resp.ToGRPCMessage().(*containerGRPC.AnnounceUsedSpaceResponse), nil
-}
diff --git a/pkg/services/container/ape.go b/pkg/services/container/ape.go
index 3ea591c6a..8fe4dd2d9 100644
--- a/pkg/services/container/ape.go
+++ b/pkg/services/container/ape.go
@@ -78,15 +78,6 @@ func NewAPEServer(router policyengine.ChainRouter, reader containers, ir ir, nm
}
}
-func (ac *apeChecker) AnnounceUsedSpace(ctx context.Context, req *container.AnnounceUsedSpaceRequest) (*container.AnnounceUsedSpaceResponse, error) {
- ctx, span := tracing.StartSpanFromContext(ctx, "apeChecker.AnnounceUsedSpace")
- defer span.End()
-
- // this method is not used, so not checked
-
- return ac.next.AnnounceUsedSpace(ctx, req)
-}
-
func (ac *apeChecker) Delete(ctx context.Context, req *container.DeleteRequest) (*container.DeleteResponse, error) {
ctx, span := tracing.StartSpanFromContext(ctx, "apeChecker.Delete")
defer span.End()
@@ -303,18 +294,6 @@ func (ac *apeChecker) getRoleWithoutContainerID(oID *refs.OwnerID, mh *session.R
return nativeschema.PropertyValueContainerRoleOthers, pk, nil
}
-func (ac *apeChecker) SetExtendedACL(ctx context.Context, req *container.SetExtendedACLRequest) (*container.SetExtendedACLResponse, error) {
- ctx, span := tracing.StartSpanFromContext(ctx, "apeChecker.SetExtendedACL")
- defer span.End()
-
- if err := ac.validateContainerBoundedOperation(ctx, req.GetBody().GetEACL().GetContainerID(), req.GetMetaHeader(), req.GetVerificationHeader(),
- nativeschema.MethodSetContainerEACL); err != nil {
- return nil, err
- }
-
- return ac.next.SetExtendedACL(ctx, req)
-}
-
func (ac *apeChecker) validateContainerBoundedOperation(ctx context.Context, containerID *refs.ContainerID, mh *session.RequestMetaHeader, vh *session.RequestVerificationHeader, op string) error {
if vh == nil {
return errMissingVerificationHeader
diff --git a/pkg/services/container/ape_test.go b/pkg/services/container/ape_test.go
index a6f0fb222..9eed469ca 100644
--- a/pkg/services/container/ape_test.go
+++ b/pkg/services/container/ape_test.go
@@ -9,7 +9,6 @@ import (
"net"
"testing"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/acl"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
session "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
@@ -50,7 +49,6 @@ func TestAPE(t *testing.T) {
t.Run("deny get container by user claim tag", testDenyGetContainerByUserClaimTag)
t.Run("deny get container by IP", testDenyGetContainerByIP)
t.Run("deny get container by group id", testDenyGetContainerByGroupID)
- t.Run("deny set container eACL for IR", testDenySetContainerEACLForIR)
t.Run("deny get container eACL for IR with session token", testDenyGetContainerEACLForIRSessionToken)
t.Run("deny put container for others with session token", testDenyPutContainerForOthersSessionToken)
t.Run("deny put container, read namespace from frostfsID", testDenyPutContainerReadNamespaceFromFrostfsID)
@@ -665,84 +663,6 @@ func testDenyGetContainerByGroupID(t *testing.T) {
require.ErrorAs(t, err, &errAccessDenied)
}
-func testDenySetContainerEACLForIR(t *testing.T) {
- t.Parallel()
- srv := &srvStub{
- calls: map[string]int{},
- }
- router := inmemory.NewInMemory()
- contRdr := &containerStub{
- c: map[cid.ID]*containercore.Container{},
- }
- ir := &irStub{
- keys: [][]byte{},
- }
- nm := &netmapStub{}
- frostfsIDSubjectReader := &frostfsidStub{
- subjects: map[util.Uint160]*client.Subject{},
- }
- apeSrv := NewAPEServer(router, contRdr, ir, nm, frostfsIDSubjectReader, srv)
-
- contID := cidtest.ID()
- testContainer := containertest.Container()
- pp := netmap.PlacementPolicy{}
- require.NoError(t, pp.DecodeString("REP 1"))
- testContainer.SetPlacementPolicy(pp)
- contRdr.c[contID] = &containercore.Container{Value: testContainer}
-
- nm.currentEpoch = 100
- nm.netmaps = map[uint64]*netmap.NetMap{}
- var testNetmap netmap.NetMap
- testNetmap.SetEpoch(nm.currentEpoch)
- testNetmap.SetNodes([]netmap.NodeInfo{{}})
- nm.netmaps[nm.currentEpoch] = &testNetmap
- nm.netmaps[nm.currentEpoch-1] = &testNetmap
-
- _, _, err := router.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.ContainerTarget(contID.EncodeToString()), &chain.Chain{
- Rules: []chain.Rule{
- {
- Status: chain.AccessDenied,
- Actions: chain.Actions{
- Names: []string{
- nativeschema.MethodSetContainerEACL,
- },
- },
- Resources: chain.Resources{
- Names: []string{
- fmt.Sprintf(nativeschema.ResourceFormatRootContainer, contID.EncodeToString()),
- },
- },
- Condition: []chain.Condition{
- {
- Kind: chain.KindRequest,
- Key: nativeschema.PropertyKeyActorRole,
- Value: nativeschema.PropertyValueContainerRoleIR,
- Op: chain.CondStringEquals,
- },
- },
- },
- },
- })
- require.NoError(t, err)
-
- req := &container.SetExtendedACLRequest{}
- req.SetBody(&container.SetExtendedACLRequestBody{})
- var refContID refs.ContainerID
- contID.WriteToV2(&refContID)
- req.GetBody().SetEACL(&acl.Table{})
- req.GetBody().GetEACL().SetContainerID(&refContID)
-
- pk, err := keys.NewPrivateKey()
- require.NoError(t, err)
- require.NoError(t, signature.SignServiceMessage(&pk.PrivateKey, req))
- ir.keys = append(ir.keys, pk.PublicKey().Bytes())
-
- resp, err := apeSrv.SetExtendedACL(context.Background(), req)
- require.Nil(t, resp)
- var errAccessDenied *apistatus.ObjectAccessDenied
- require.ErrorAs(t, err, &errAccessDenied)
-}
-
func testDenyGetContainerEACLForIRSessionToken(t *testing.T) {
t.Parallel()
srv := &srvStub{
@@ -1229,11 +1149,6 @@ type srvStub struct {
calls map[string]int
}
-func (s *srvStub) AnnounceUsedSpace(context.Context, *container.AnnounceUsedSpaceRequest) (*container.AnnounceUsedSpaceResponse, error) {
- s.calls["AnnounceUsedSpace"]++
- return &container.AnnounceUsedSpaceResponse{}, nil
-}
-
func (s *srvStub) Delete(context.Context, *container.DeleteRequest) (*container.DeleteResponse, error) {
s.calls["Delete"]++
return &container.DeleteResponse{}, nil
@@ -1259,11 +1174,6 @@ func (s *srvStub) Put(context.Context, *container.PutRequest) (*container.PutRes
return &container.PutResponse{}, nil
}
-func (s *srvStub) SetExtendedACL(context.Context, *container.SetExtendedACLRequest) (*container.SetExtendedACLResponse, error) {
- s.calls["SetExtendedACL"]++
- return &container.SetExtendedACLResponse{}, nil
-}
-
type irStub struct {
keys [][]byte
}
diff --git a/pkg/services/container/audit.go b/pkg/services/container/audit.go
index 7ef432bb1..34fd5923f 100644
--- a/pkg/services/container/audit.go
+++ b/pkg/services/container/audit.go
@@ -6,7 +6,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container"
container_grpc "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container/grpc"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/audit"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
@@ -29,24 +28,6 @@ func NewAuditService(next Server, log *logger.Logger, enabled *atomic.Bool) Serv
}
}
-// AnnounceUsedSpace implements Server.
-func (a *auditService) AnnounceUsedSpace(ctx context.Context, req *container.AnnounceUsedSpaceRequest) (*container.AnnounceUsedSpaceResponse, error) {
- res, err := a.next.AnnounceUsedSpace(ctx, req)
- if !a.enabled.Load() {
- return res, err
- }
-
- var ids []*refs.ContainerID
- for _, v := range req.GetBody().GetAnnouncements() {
- ids = append(ids, v.GetContainerID())
- }
-
- audit.LogRequest(a.log, container_grpc.ContainerService_AnnounceUsedSpace_FullMethodName, req,
- audit.TargetFromRefs(ids, &cid.ID{}), err == nil)
-
- return res, err
-}
-
// Delete implements Server.
func (a *auditService) Delete(ctx context.Context, req *container.DeleteRequest) (*container.DeleteResponse, error) {
res, err := a.next.Delete(ctx, req)
@@ -103,14 +84,3 @@ func (a *auditService) Put(ctx context.Context, req *container.PutRequest) (*con
audit.TargetFromRef(res.GetBody().GetContainerID(), &cid.ID{}), err == nil)
return res, err
}
-
-// SetExtendedACL implements Server.
-func (a *auditService) SetExtendedACL(ctx context.Context, req *container.SetExtendedACLRequest) (*container.SetExtendedACLResponse, error) {
- res, err := a.next.SetExtendedACL(ctx, req)
- if !a.enabled.Load() {
- return res, err
- }
- audit.LogRequest(a.log, container_grpc.ContainerService_SetExtendedACL_FullMethodName, req,
- audit.TargetFromRef(req.GetBody().GetEACL().GetContainerID(), &cid.ID{}), err == nil)
- return res, err
-}
diff --git a/pkg/services/container/executor.go b/pkg/services/container/executor.go
index d4ae11d62..b64963e25 100644
--- a/pkg/services/container/executor.go
+++ b/pkg/services/container/executor.go
@@ -14,7 +14,6 @@ type ServiceExecutor interface {
Delete(context.Context, *session.Token, *container.DeleteRequestBody) (*container.DeleteResponseBody, error)
Get(context.Context, *container.GetRequestBody) (*container.GetResponseBody, error)
List(context.Context, *container.ListRequestBody) (*container.ListResponseBody, error)
- SetExtendedACL(context.Context, *session.Token, *container.SetExtendedACLRequestBody) (*container.SetExtendedACLResponseBody, error)
GetExtendedACL(context.Context, *container.GetExtendedACLRequestBody) (*container.GetExtendedACLResponseBody, error)
}
@@ -96,24 +95,6 @@ func (s *executorSvc) List(ctx context.Context, req *container.ListRequest) (*co
return resp, nil
}
-func (s *executorSvc) SetExtendedACL(ctx context.Context, req *container.SetExtendedACLRequest) (*container.SetExtendedACLResponse, error) {
- meta := req.GetMetaHeader()
- for origin := meta.GetOrigin(); origin != nil; origin = meta.GetOrigin() {
- meta = origin
- }
-
- respBody, err := s.exec.SetExtendedACL(ctx, meta.GetSessionToken(), req.GetBody())
- if err != nil {
- return nil, fmt.Errorf("could not execute SetEACL request: %w", err)
- }
-
- resp := new(container.SetExtendedACLResponse)
- resp.SetBody(respBody)
-
- s.respSvc.SetMeta(resp)
- return resp, nil
-}
-
func (s *executorSvc) GetExtendedACL(ctx context.Context, req *container.GetExtendedACLRequest) (*container.GetExtendedACLResponse, error) {
respBody, err := s.exec.GetExtendedACL(ctx, req.GetBody())
if err != nil {
diff --git a/pkg/services/container/morph/executor.go b/pkg/services/container/morph/executor.go
index e2e79f3d2..57dac32f0 100644
--- a/pkg/services/container/morph/executor.go
+++ b/pkg/services/container/morph/executor.go
@@ -13,8 +13,6 @@ import (
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/status"
)
var errMissingUserID = errors.New("missing user ID")
@@ -204,10 +202,6 @@ func (s *morphExecutor) List(_ context.Context, body *container.ListRequestBody)
return res, nil
}
-func (s *morphExecutor) SetExtendedACL(_ context.Context, _ *sessionV2.Token, _ *container.SetExtendedACLRequestBody) (*container.SetExtendedACLResponseBody, error) {
- return nil, status.Errorf(codes.Unimplemented, "method SetExtendedACL not implemented")
-}
-
func (s *morphExecutor) GetExtendedACL(_ context.Context, body *container.GetExtendedACLRequestBody) (*container.GetExtendedACLResponseBody, error) {
idV2 := body.GetContainerID()
if idV2 == nil {
diff --git a/pkg/services/container/server.go b/pkg/services/container/server.go
index 052a8c945..d714d7f02 100644
--- a/pkg/services/container/server.go
+++ b/pkg/services/container/server.go
@@ -12,7 +12,5 @@ type Server interface {
Get(context.Context, *container.GetRequest) (*container.GetResponse, error)
Delete(context.Context, *container.DeleteRequest) (*container.DeleteResponse, error)
List(context.Context, *container.ListRequest) (*container.ListResponse, error)
- SetExtendedACL(context.Context, *container.SetExtendedACLRequest) (*container.SetExtendedACLResponse, error)
GetExtendedACL(context.Context, *container.GetExtendedACLRequest) (*container.GetExtendedACLResponse, error)
- AnnounceUsedSpace(context.Context, *container.AnnounceUsedSpaceRequest) (*container.AnnounceUsedSpaceResponse, error)
}
diff --git a/pkg/services/container/sign.go b/pkg/services/container/sign.go
index bba717f60..62aa3fe27 100644
--- a/pkg/services/container/sign.go
+++ b/pkg/services/container/sign.go
@@ -57,15 +57,6 @@ func (s *signService) List(ctx context.Context, req *container.ListRequest) (*co
return resp, s.sigSvc.SignResponse(resp, err)
}
-func (s *signService) SetExtendedACL(ctx context.Context, req *container.SetExtendedACLRequest) (*container.SetExtendedACLResponse, error) {
- if err := s.sigSvc.VerifyRequest(req); err != nil {
- resp := new(container.SetExtendedACLResponse)
- return resp, s.sigSvc.SignResponse(resp, err)
- }
- resp, err := util.EnsureNonNilResponse(s.svc.SetExtendedACL(ctx, req))
- return resp, s.sigSvc.SignResponse(resp, err)
-}
-
func (s *signService) GetExtendedACL(ctx context.Context, req *container.GetExtendedACLRequest) (*container.GetExtendedACLResponse, error) {
if err := s.sigSvc.VerifyRequest(req); err != nil {
resp := new(container.GetExtendedACLResponse)
@@ -74,12 +65,3 @@ func (s *signService) GetExtendedACL(ctx context.Context, req *container.GetExte
resp, err := util.EnsureNonNilResponse(s.svc.GetExtendedACL(ctx, req))
return resp, s.sigSvc.SignResponse(resp, err)
}
-
-func (s *signService) AnnounceUsedSpace(ctx context.Context, req *container.AnnounceUsedSpaceRequest) (*container.AnnounceUsedSpaceResponse, error) {
- if err := s.sigSvc.VerifyRequest(req); err != nil {
- resp := new(container.AnnounceUsedSpaceResponse)
- return resp, s.sigSvc.SignResponse(resp, err)
- }
- resp, err := util.EnsureNonNilResponse(s.svc.AnnounceUsedSpace(ctx, req))
- return resp, s.sigSvc.SignResponse(resp, err)
-}
From 85a77b7c21accc667f4f5cb50ea1a0bf8632cedb Mon Sep 17 00:00:00 2001
From: Airat Arifullin
Date: Mon, 29 Jul 2024 15:33:36 +0300
Subject: [PATCH 011/705] [#1279] adm: Interpret "root" name as empty for
namespace target type
Signed-off-by: Airat Arifullin
---
cmd/frostfs-adm/internal/modules/morph/ape/ape_util.go | 6 ++++++
1 file changed, 6 insertions(+)
diff --git a/cmd/frostfs-adm/internal/modules/morph/ape/ape_util.go b/cmd/frostfs-adm/internal/modules/morph/ape/ape_util.go
index 5e17f4014..d4aedda2e 100644
--- a/cmd/frostfs-adm/internal/modules/morph/ape/ape_util.go
+++ b/cmd/frostfs-adm/internal/modules/morph/ape/ape_util.go
@@ -38,6 +38,12 @@ var (
func parseTarget(cmd *cobra.Command) policyengine.Target {
name, _ := cmd.Flags().GetString(targetNameFlag)
typ, err := parseTargetType(cmd)
+
+ // interpret "root" namespace as empty
+ if typ == policyengine.Namespace && name == "root" {
+ name = ""
+ }
+
commonCmd.ExitOnErr(cmd, "read target type error: %w", err)
return policyengine.Target{
From a12c39667d2b95f65697205b90f5d2e61d09132e Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Mon, 29 Jul 2024 11:31:05 +0300
Subject: [PATCH 012/705] [#1278] ir: Do not allow to create container without
FrostFSID record
Signed-off-by: Dmitrii Stepanov
---
pkg/innerring/processors/container/handlers_test.go | 3 +--
.../processors/container/process_container.go | 10 +++++-----
2 files changed, 6 insertions(+), 7 deletions(-)
diff --git a/pkg/innerring/processors/container/handlers_test.go b/pkg/innerring/processors/container/handlers_test.go
index 1aac31ae3..dc1e919bb 100644
--- a/pkg/innerring/processors/container/handlers_test.go
+++ b/pkg/innerring/processors/container/handlers_test.go
@@ -3,7 +3,6 @@ package container
import (
"crypto/ecdsa"
"encoding/hex"
- "fmt"
"testing"
"time"
@@ -238,5 +237,5 @@ func (c *testMorphClient) NotarySignAndInvokeTX(mainTx *transaction.Transaction)
type testFrostFSIDClient struct{}
func (c *testFrostFSIDClient) GetSubject(addr util.Uint160) (*frostfsidclient.Subject, error) {
- return nil, fmt.Errorf("subject not found")
+ return &frostfsidclient.Subject{}, nil
}
diff --git a/pkg/innerring/processors/container/process_container.go b/pkg/innerring/processors/container/process_container.go
index a950997fd..d89b63e82 100644
--- a/pkg/innerring/processors/container/process_container.go
+++ b/pkg/innerring/processors/container/process_container.go
@@ -180,11 +180,6 @@ func (cp *Processor) checkNNS(ctx *putContainerContext, cnr containerSDK.Contain
}
}
- namespace, hasNamespace := strings.CutSuffix(ctx.d.Zone(), ".ns")
- if !hasNamespace {
- return nil
- }
-
addr, err := util.Uint160DecodeBytesBE(cnr.Owner().WalletBytes()[1 : 1+util.Uint160Size])
if err != nil {
return fmt.Errorf("could not get container owner address: %w", err)
@@ -195,6 +190,11 @@ func (cp *Processor) checkNNS(ctx *putContainerContext, cnr containerSDK.Contain
return fmt.Errorf("could not get subject from FrostfsID contract: %w", err)
}
+ namespace, hasNamespace := strings.CutSuffix(ctx.d.Zone(), ".ns")
+ if !hasNamespace {
+ return nil
+ }
+
if subject.Namespace != namespace {
return errContainerAndOwnerNamespaceDontMatch
}
From 7e04083c273c7c50eebd76cde5d46d91fcc494a2 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Mon, 29 Jul 2024 13:03:55 +0300
Subject: [PATCH 013/705] [#1278] containerSvc: Validate FrostFSID subject
exitence on Put
Signed-off-by: Dmitrii Stepanov
---
pkg/services/container/ape.go | 21 ++++++++++++++++++++-
pkg/services/container/ape_test.go | 15 ++++++++++-----
2 files changed, 30 insertions(+), 6 deletions(-)
diff --git a/pkg/services/container/ape.go b/pkg/services/container/ape.go
index 8fe4dd2d9..6f8a8e0e6 100644
--- a/pkg/services/container/ape.go
+++ b/pkg/services/container/ape.go
@@ -211,7 +211,7 @@ func (ac *apeChecker) Put(ctx context.Context, req *container.PutRequest) (*cont
}
}
- namespace, err := ac.namespaceByOwner(req.GetBody().GetContainer().GetOwnerID())
+ namespace, err := ac.namespaceByKnownOwner(req.GetBody().GetContainer().GetOwnerID())
if err != nil {
return nil, fmt.Errorf("get namespace error: %w", err)
}
@@ -608,6 +608,25 @@ func (ac *apeChecker) namespaceByOwner(owner *refs.OwnerID) (string, error) {
return namespace, nil
}
+func (ac *apeChecker) namespaceByKnownOwner(owner *refs.OwnerID) (string, error) {
+ var ownerSDK user.ID
+ if owner == nil {
+ return "", errOwnerIDIsNotSet
+ }
+ if err := ownerSDK.ReadFromV2(*owner); err != nil {
+ return "", err
+ }
+ addr, err := ownerSDK.ScriptHash()
+ if err != nil {
+ return "", err
+ }
+ subject, err := ac.frostFSIDClient.GetSubject(addr)
+ if err != nil {
+ return "", fmt.Errorf("get subject error: %w", err)
+ }
+ return subject.Namespace, nil
+}
+
// validateNamespace validates a namespace set in a container.
// If frostfs-id contract stores a namespace N1 for an owner ID and a container within a request
// is set with namespace N2 (via Zone() property), then N2 is invalid and the request is denied.
diff --git a/pkg/services/container/ape_test.go b/pkg/services/container/ape_test.go
index 9eed469ca..68c1158a6 100644
--- a/pkg/services/container/ape_test.go
+++ b/pkg/services/container/ape_test.go
@@ -765,17 +765,22 @@ func testDenyPutContainerForOthersSessionToken(t *testing.T) {
keys: [][]byte{},
}
nm := &netmapStub{}
- frostfsIDSubjectReader := &frostfsidStub{
- subjects: map[util.Uint160]*client.Subject{},
- }
- apeSrv := NewAPEServer(router, contRdr, ir, nm, frostfsIDSubjectReader, srv)
testContainer := containertest.Container()
+ owner := testContainer.Owner()
+ ownerAddr, err := owner.ScriptHash()
+ require.NoError(t, err)
+ frostfsIDSubjectReader := &frostfsidStub{
+ subjects: map[util.Uint160]*client.Subject{
+ ownerAddr: {},
+ },
+ }
+ apeSrv := NewAPEServer(router, contRdr, ir, nm, frostfsIDSubjectReader, srv)
nm.currentEpoch = 100
nm.netmaps = map[uint64]*netmap.NetMap{}
- _, _, err := router.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.NamespaceTarget(""), &chain.Chain{
+ _, _, err = router.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.NamespaceTarget(""), &chain.Chain{
Rules: []chain.Rule{
{
Status: chain.AccessDenied,
From c49982d22ac0f2f4314ba2d79ab835d93cc437c6 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Tue, 30 Jul 2024 15:41:57 +0300
Subject: [PATCH 014/705] [#1282] cli: Allow to external addresses first for
`object nodes`
Signed-off-by: Dmitrii Stepanov
---
cmd/frostfs-cli/modules/object/nodes.go | 23 +++++++++++++++++------
1 file changed, 17 insertions(+), 6 deletions(-)
diff --git a/cmd/frostfs-cli/modules/object/nodes.go b/cmd/frostfs-cli/modules/object/nodes.go
index d04cf6f04..42ae7324e 100644
--- a/cmd/frostfs-cli/modules/object/nodes.go
+++ b/cmd/frostfs-cli/modules/object/nodes.go
@@ -30,7 +30,8 @@ import (
)
const (
- verifyPresenceAllFlag = "verify-presence-all"
+ verifyPresenceAllFlag = "verify-presence-all"
+ preferInternalAddressesFlag = "prefer-internal-addresses"
)
var (
@@ -97,6 +98,7 @@ func initObjectNodesCmd() {
flags.Bool(verifyPresenceAllFlag, false, "Verify the actual presence of the object on all netmap nodes.")
flags.Bool(commonflags.JSON, false, "Print information about the object placement as json.")
+ flags.Bool(preferInternalAddressesFlag, false, "Use internal addresses first to get object info.")
}
func objectNodes(cmd *cobra.Command, _ []string) {
@@ -449,11 +451,20 @@ func getNodesToCheckObjectExistance(cmd *cobra.Command, netmap *netmapSDK.NetMap
func createClient(ctx context.Context, cmd *cobra.Command, candidate netmapSDK.NodeInfo, pk *ecdsa.PrivateKey) (*client.Client, error) {
var cli *client.Client
var addresses []string
- candidate.IterateNetworkEndpoints(func(s string) bool {
- addresses = append(addresses, s)
- return false
- })
- addresses = append(addresses, candidate.ExternalAddresses()...)
+ if preferInternal, _ := cmd.Flags().GetBool(preferInternalAddressesFlag); preferInternal {
+ candidate.IterateNetworkEndpoints(func(s string) bool {
+ addresses = append(addresses, s)
+ return false
+ })
+ addresses = append(addresses, candidate.ExternalAddresses()...)
+ } else {
+ addresses = append(addresses, candidate.ExternalAddresses()...)
+ candidate.IterateNetworkEndpoints(func(s string) bool {
+ addresses = append(addresses, s)
+ return false
+ })
+ }
+
var lastErr error
for _, address := range addresses {
var networkAddr network.Address
From a55600893efeadfc48a83f3f2529d846f4d534c3 Mon Sep 17 00:00:00 2001
From: Ekaterina Lebedeva
Date: Fri, 2 Aug 2024 12:31:59 +0300
Subject: [PATCH 015/705] [#1266] Makefile: Specify gofumpt version
Add target to install gofumpt, fix target to run gofumpt.
Signed-off-by: Ekaterina Lebedeva
---
Makefile | 15 ++++++++++++++-
1 file changed, 14 insertions(+), 1 deletion(-)
diff --git a/Makefile b/Makefile
index c93d06aa8..11111d9a7 100755
--- a/Makefile
+++ b/Makefile
@@ -46,6 +46,10 @@ STATICCHECK_VERSION_DIR ?= $(STATICCHECK_DIR)/$(STATICCHECK_VERSION)
SOURCES = $(shell find . -type f -name "*.go" -print)
+GOFUMPT_VERSION ?= v0.6.0
+GOFUMPT_DIR ?= $(abspath $(BIN))/gofumpt
+GOFUMPT_VERSION_DIR ?= $(GOFUMPT_DIR)/$(GOFUMPT_VERSION)
+
GOPLS_VERSION ?= v0.15.1
GOPLS_DIR ?= $(abspath $(BIN))/gopls
GOPLS_VERSION_DIR ?= $(GOPLS_DIR)/$(GOPLS_VERSION)
@@ -165,10 +169,19 @@ imports:
@echo "⇒ Processing goimports check"
@goimports -w cmd/ pkg/ misc/
+# Install gofumpt
+fumpt-install:
+ @rm -rf $(GOFUMPT_DIR)
+ @mkdir $(GOFUMPT_DIR)
+ @GOBIN=$(GOFUMPT_VERSION_DIR) go install mvdan.cc/gofumpt@$(GOFUMPT_VERSION)
+
# Run gofumpt
fumpt:
+ @if [ ! -d "$(GOFUMPT_VERSION_DIR)" ]; then \
+ make fumpt-install; \
+ fi
@echo "⇒ Processing gofumpt check"
- @gofumpt -l -w cmd/ pkg/ misc/
+ $(GOFUMPT_VERSION_DIR)/gofumpt -l -w cmd/ pkg/ misc/
# Run Unit Test with go test
test: GOFLAGS ?= "-count=1"
From ef4cea6d19eb8a4d05e47647f00d874c892c17ea Mon Sep 17 00:00:00 2001
From: Ekaterina Lebedeva
Date: Fri, 2 Aug 2024 12:34:04 +0300
Subject: [PATCH 016/705] [#1266] .forgejo: Add gofumpt action
`gofumpt` was skipped by pre-commit on CI, and now is used
in a separate action.
Signed-off-by: Ekaterina Lebedeva
---
.forgejo/workflows/tests.yml | 18 ++++++++++++++++++
1 file changed, 18 insertions(+)
diff --git a/.forgejo/workflows/tests.yml b/.forgejo/workflows/tests.yml
index b0c9adbf2..5d64d7bc4 100644
--- a/.forgejo/workflows/tests.yml
+++ b/.forgejo/workflows/tests.yml
@@ -89,3 +89,21 @@ jobs:
- name: Run gopls
run: make gopls-run
+
+ fumpt:
+ name: Run gofumpt
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v3
+
+ - name: Set up Go
+ uses: actions/setup-go@v3
+ with:
+ go-version: '1.22'
+ cache: true
+
+ - name: Install gofumpt
+ run: make fumpt-install
+
+ - name: Run gofumpt
+ run: make fumpt
From 8021bacc43c1ae065033bfefa163d802b9eaeb33 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Thu, 1 Aug 2024 16:08:50 +0300
Subject: [PATCH 017/705] [#1288] putSvc: Respect TTL for EC put
Signed-off-by: Dmitrii Stepanov
---
pkg/services/object/put/ec.go | 25 +++++++++++++++++++------
1 file changed, 19 insertions(+), 6 deletions(-)
diff --git a/pkg/services/object/put/ec.go b/pkg/services/object/put/ec.go
index 6da50195e..fbb51912c 100644
--- a/pkg/services/object/put/ec.go
+++ b/pkg/services/object/put/ec.go
@@ -17,6 +17,7 @@ import (
containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/erasurecode"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/transformer"
"go.uber.org/zap"
"golang.org/x/sync/errgroup"
@@ -39,7 +40,7 @@ type ecWriter struct {
}
func (e *ecWriter) WriteObject(ctx context.Context, obj *objectSDK.Object) error {
- relayed, err := e.relayIfNotContainerNode(ctx)
+ relayed, err := e.relayIfNotContainerNode(ctx, obj)
if err != nil {
return err
}
@@ -65,7 +66,7 @@ func (e *ecWriter) WriteObject(ctx context.Context, obj *objectSDK.Object) error
return e.writeRawObject(ctx, obj)
}
-func (e *ecWriter) relayIfNotContainerNode(ctx context.Context) (bool, error) {
+func (e *ecWriter) relayIfNotContainerNode(ctx context.Context, obj *objectSDK.Object) (bool, error) {
if e.relay == nil {
return false, nil
}
@@ -77,7 +78,13 @@ func (e *ecWriter) relayIfNotContainerNode(ctx context.Context) (bool, error) {
// object can be splitted or saved local
return false, nil
}
- if err := e.relayToContainerNode(ctx); err != nil {
+ objID := object.AddressOf(obj).Object()
+ var index uint32
+ if obj.ECHeader() != nil {
+ objID = obj.ECHeader().Parent()
+ index = obj.ECHeader().Index()
+ }
+ if err := e.relayToContainerNode(ctx, objID, index); err != nil {
return false, err
}
return true, nil
@@ -102,18 +109,20 @@ func (e *ecWriter) currentNodeIsContainerNode() (bool, error) {
return false, nil
}
-func (e *ecWriter) relayToContainerNode(ctx context.Context) error {
- t, err := placement.NewTraverser(e.placementOpts...)
+func (e *ecWriter) relayToContainerNode(ctx context.Context, objID oid.ID, index uint32) error {
+ t, err := placement.NewTraverser(append(e.placementOpts, placement.ForObject(objID))...)
if err != nil {
return err
}
var lastErr error
+ offset := int(index)
for {
nodes := t.Next()
if len(nodes) == 0 {
break
}
- for _, node := range nodes {
+ for idx := range nodes {
+ node := nodes[(idx+offset)%len(nodes)]
var info client.NodeInfo
client.NodeInfoFromNetmapElement(&info, node)
@@ -149,6 +158,10 @@ func (e *ecWriter) relayToContainerNode(ctx context.Context) error {
}
func (e *ecWriter) writeECPart(ctx context.Context, obj *objectSDK.Object) error {
+ if e.commonPrm.LocalOnly() {
+ return e.writePartLocal(ctx, obj)
+ }
+
t, err := placement.NewTraverser(append(e.placementOpts, placement.ForObject(obj.ECHeader().Parent()))...)
if err != nil {
return err
From dc3dcabadca1d1bfb20dab5947ec4c22f3d2362b Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Fri, 2 Aug 2024 17:50:49 +0300
Subject: [PATCH 018/705] [#1291] morph: Reconnect to the highest priority
endpoint
Signed-off-by: Dmitrii Stepanov
---
pkg/morph/client/constructor.go | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/pkg/morph/client/constructor.go b/pkg/morph/client/constructor.go
index 648c7d3c0..78cb3e82f 100644
--- a/pkg/morph/client/constructor.go
+++ b/pkg/morph/client/constructor.go
@@ -148,6 +148,10 @@ func New(ctx context.Context, key *keys.PrivateKey, opts ...Option) (*Client, er
} else {
cli.logger.Info(logs.FrostFSIRCreatedRPCClientForEndpoint,
zap.String("endpoint", endpoint.Address))
+ if cli.endpoints.curr > 0 && cli.cfg.switchInterval != 0 {
+ cli.switchIsActive.Store(true)
+ go cli.switchToMostPrioritized(ctx)
+ }
break
}
}
From 327d364f34de730879f330ea51d8801f5c6bddc9 Mon Sep 17 00:00:00 2001
From: Ekaterina Lebedeva
Date: Fri, 2 Aug 2024 16:20:51 +0300
Subject: [PATCH 019/705] [#1262] sdnotify: Get rid of go:linkname for nanotime
Signed-off-by: Ekaterina Lebedeva
---
pkg/util/sdnotify/clock.go | 10 ----------
pkg/util/sdnotify/clock.s | 2 --
pkg/util/sdnotify/sdnotify.go | 4 +++-
3 files changed, 3 insertions(+), 13 deletions(-)
delete mode 100644 pkg/util/sdnotify/clock.go
delete mode 100644 pkg/util/sdnotify/clock.s
diff --git a/pkg/util/sdnotify/clock.go b/pkg/util/sdnotify/clock.go
deleted file mode 100644
index f5419d027..000000000
--- a/pkg/util/sdnotify/clock.go
+++ /dev/null
@@ -1,10 +0,0 @@
-package sdnotify
-
-import (
- // For go:linkname to work.
- _ "unsafe"
-)
-
-//go:noescape
-//go:linkname nanotime runtime.nanotime
-func nanotime() int64
diff --git a/pkg/util/sdnotify/clock.s b/pkg/util/sdnotify/clock.s
deleted file mode 100644
index ad033ff4f..000000000
--- a/pkg/util/sdnotify/clock.s
+++ /dev/null
@@ -1,2 +0,0 @@
-// The file is intentionally empty.
-// It is a workaround for https://github.com/golang/go/issues/15006
diff --git a/pkg/util/sdnotify/sdnotify.go b/pkg/util/sdnotify/sdnotify.go
index 16a3f11c1..a3af50b22 100644
--- a/pkg/util/sdnotify/sdnotify.go
+++ b/pkg/util/sdnotify/sdnotify.go
@@ -6,6 +6,7 @@ import (
"net"
"os"
"strings"
+ "time"
)
const (
@@ -16,6 +17,7 @@ const (
var (
socket *net.UnixAddr
+ start = time.Now()
errSocketVariableIsNotPresent = errors.New("\"NOTIFY_SOCKET\" environment variable is not present")
errSocketIsNotInitialized = errors.New("socket is not initialized")
@@ -51,7 +53,7 @@ func FlagAndStatus(status string) error {
// must be sent, containing "READY=1".
//
// For MONOTONIC_USEC format refer to https://www.man7.org/linux/man-pages/man3/sd_notify.3.html
- status += fmt.Sprintf("\nMONOTONIC_USEC=%d", uint64(nanotime())/1e3 /* microseconds in nanoseconds */)
+ status += fmt.Sprintf("\nMONOTONIC_USEC=%d", uint64(time.Since(start))/1e3 /* microseconds in nanoseconds */)
}
status += "\nSTATUS=" + strings.TrimSuffix(status, "=1")
return Send(status)
From eeca796d2e5f60b0331e05e29735d7651e9794ae Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Mon, 5 Aug 2024 13:24:48 +0300
Subject: [PATCH 020/705] [#1295] engine: Log object address in case of error
Signed-off-by: Dmitrii Stepanov
---
pkg/local_object_storage/engine/delete.go | 6 +++---
pkg/local_object_storage/engine/exists.go | 3 ++-
pkg/local_object_storage/engine/get.go | 2 +-
pkg/local_object_storage/engine/head.go | 3 ++-
pkg/local_object_storage/engine/inhume.go | 8 ++++----
pkg/local_object_storage/engine/lock.go | 13 +++++++++----
pkg/local_object_storage/engine/put.go | 2 +-
pkg/local_object_storage/engine/range.go | 2 +-
8 files changed, 23 insertions(+), 16 deletions(-)
diff --git a/pkg/local_object_storage/engine/delete.go b/pkg/local_object_storage/engine/delete.go
index 096528967..318f938fb 100644
--- a/pkg/local_object_storage/engine/delete.go
+++ b/pkg/local_object_storage/engine/delete.go
@@ -100,7 +100,7 @@ func (e *StorageEngine) delete(ctx context.Context, prm DeletePrm) (DeleteRes, e
return false
} else {
if !client.IsErrObjectNotFound(err) {
- e.reportShardError(sh, "could not check object existence", err)
+ e.reportShardError(sh, "could not check object existence", err, zap.Stringer("address", prm.addr))
}
return false
}
@@ -116,7 +116,7 @@ func (e *StorageEngine) delete(ctx context.Context, prm DeletePrm) (DeleteRes, e
_, err = sh.Inhume(ctx, shPrm)
if err != nil {
- e.reportShardError(sh, "could not inhume object in shard", err)
+ e.reportShardError(sh, "could not inhume object in shard", err, zap.Stringer("address", prm.addr))
var target *apistatus.ObjectLocked
locked.is = errors.As(err, &target)
@@ -191,7 +191,7 @@ func (e *StorageEngine) deleteChunks(
var objID oid.ID
err := objID.ReadFromV2(chunk.ID)
if err != nil {
- e.reportShardError(sh, "could not delete EC chunk", err)
+ e.reportShardError(sh, "could not delete EC chunk", err, zap.Stringer("address", prm.addr))
}
addr.SetObject(objID)
inhumePrm.MarkAsGarbage(addr)
diff --git a/pkg/local_object_storage/engine/exists.go b/pkg/local_object_storage/engine/exists.go
index c57f79691..d98101306 100644
--- a/pkg/local_object_storage/engine/exists.go
+++ b/pkg/local_object_storage/engine/exists.go
@@ -8,6 +8,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ "go.uber.org/zap"
)
// exists return in the first value true if object exists.
@@ -36,7 +37,7 @@ func (e *StorageEngine) exists(ctx context.Context, shPrm shard.ExistsPrm) (bool
}
if !client.IsErrObjectNotFound(err) {
- e.reportShardError(sh, "could not check existence of object in shard", err)
+ e.reportShardError(sh, "could not check existence of object in shard", err, zap.Stringer("address", shPrm.Address))
}
return false
}
diff --git a/pkg/local_object_storage/engine/get.go b/pkg/local_object_storage/engine/get.go
index 991af3d1a..253256c34 100644
--- a/pkg/local_object_storage/engine/get.go
+++ b/pkg/local_object_storage/engine/get.go
@@ -186,7 +186,7 @@ func (i *getShardIterator) tryGetWithMeta(ctx context.Context) {
i.ObjectExpired = true
return true
default:
- i.Engine.reportShardError(sh, "could not get object from shard", err)
+ i.Engine.reportShardError(sh, "could not get object from shard", err, zap.Stringer("address", i.Address))
return false
}
})
diff --git a/pkg/local_object_storage/engine/head.go b/pkg/local_object_storage/engine/head.go
index 92d1b20fc..dfe5e48a1 100644
--- a/pkg/local_object_storage/engine/head.go
+++ b/pkg/local_object_storage/engine/head.go
@@ -12,6 +12,7 @@ import (
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "go.uber.org/zap"
)
// HeadPrm groups the parameters of Head operation.
@@ -118,7 +119,7 @@ func (e *StorageEngine) head(ctx context.Context, prm HeadPrm) (HeadRes, error)
outError = new(apistatus.ObjectNotFound)
return true
default:
- e.reportShardError(sh, "could not head object from shard", err)
+ e.reportShardError(sh, "could not head object from shard", err, zap.Stringer("address", prm.addr))
return false
}
}
diff --git a/pkg/local_object_storage/engine/inhume.go b/pkg/local_object_storage/engine/inhume.go
index 991305af0..683713f94 100644
--- a/pkg/local_object_storage/engine/inhume.go
+++ b/pkg/local_object_storage/engine/inhume.go
@@ -154,7 +154,7 @@ func (e *StorageEngine) inhumeAddr(ctx context.Context, addr oid.Address, prm sh
var siErr *objectSDK.SplitInfoError
var ecErr *objectSDK.ECInfoError
if !(errors.As(err, &siErr) || errors.As(err, &ecErr)) {
- e.reportShardError(sh, "could not check for presents in shard", err)
+ e.reportShardError(sh, "could not check for presents in shard", err, zap.Stringer("address", addr))
return
}
@@ -179,7 +179,7 @@ func (e *StorageEngine) inhumeAddr(ctx context.Context, addr oid.Address, prm sh
return true
}
- e.reportShardError(sh, "could not inhume object in shard", err)
+ e.reportShardError(sh, "could not inhume object in shard", err, zap.Stringer("address", addr))
return false
}
@@ -205,7 +205,7 @@ func (e *StorageEngine) IsLocked(ctx context.Context, addr oid.Address) (bool, e
e.iterateOverUnsortedShards(func(h hashedShard) (stop bool) {
locked, err = h.Shard.IsLocked(ctx, addr)
if err != nil {
- e.reportShardError(h, "can't check object's lockers", err, zap.Stringer("addr", addr),
+ e.reportShardError(h, "can't check object's lockers", err, zap.Stringer("address", addr),
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
outErr = err
return false
@@ -235,7 +235,7 @@ func (e *StorageEngine) GetLocked(ctx context.Context, addr oid.Address) ([]oid.
e.iterateOverUnsortedShards(func(h hashedShard) (stop bool) {
ld, err := h.Shard.GetLocked(ctx, addr)
if err != nil {
- e.reportShardError(h, logs.EngineInterruptGettingLockers, err, zap.Stringer("addr", addr),
+ e.reportShardError(h, logs.EngineInterruptGettingLockers, err, zap.Stringer("address", addr),
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
outErr = err
}
diff --git a/pkg/local_object_storage/engine/lock.go b/pkg/local_object_storage/engine/lock.go
index 5354c205f..3a41a7848 100644
--- a/pkg/local_object_storage/engine/lock.go
+++ b/pkg/local_object_storage/engine/lock.go
@@ -13,6 +13,7 @@ import (
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
+ "go.uber.org/zap"
)
var errLockFailed = errors.New("lock operation failed")
@@ -90,14 +91,16 @@ func (e *StorageEngine) lockSingle(ctx context.Context, idCnr cid.ID, locker, lo
var objID oid.ID
err = objID.ReadFromV2(chunk.ID)
if err != nil {
- e.reportShardError(sh, "could not lock object in shard", err)
+ e.reportShardError(sh, "could not lock object in shard", err, zap.Stringer("container_id", idCnr),
+ zap.Stringer("locker_id", locker), zap.Stringer("locked_id", locked))
return false
}
eclocked = append(eclocked, objID)
}
err = sh.Lock(ctx, idCnr, locker, eclocked)
if err != nil {
- e.reportShardError(sh, "could not lock object in shard", err)
+ e.reportShardError(sh, "could not lock object in shard", err, zap.Stringer("container_id", idCnr),
+ zap.Stringer("locker_id", locker), zap.Stringer("locked_id", locked))
return false
}
root = true
@@ -109,7 +112,8 @@ func (e *StorageEngine) lockSingle(ctx context.Context, idCnr cid.ID, locker, lo
return true
}
- e.reportShardError(sh, "could not check locked object for presence in shard", err)
+ e.reportShardError(sh, "could not check locked object for presence in shard", err, zap.Stringer("container_id", idCnr),
+ zap.Stringer("locker_id", locker), zap.Stringer("locked_id", locked))
return
}
@@ -121,7 +125,8 @@ func (e *StorageEngine) lockSingle(ctx context.Context, idCnr cid.ID, locker, lo
err := sh.Lock(ctx, idCnr, locker, []oid.ID{locked})
if err != nil {
- e.reportShardError(sh, "could not lock object in shard", err)
+ e.reportShardError(sh, "could not lock object in shard", err, zap.Stringer("container_id", idCnr),
+ zap.Stringer("locker_id", locker), zap.Stringer("locked_id", locked))
var errIrregular *apistatus.LockNonRegularObject
if errors.As(err, &errIrregular) {
diff --git a/pkg/local_object_storage/engine/put.go b/pkg/local_object_storage/engine/put.go
index 54385910b..f92d83745 100644
--- a/pkg/local_object_storage/engine/put.go
+++ b/pkg/local_object_storage/engine/put.go
@@ -187,7 +187,7 @@ func (e *StorageEngine) putToShard(ctx context.Context, sh hashedShard, pool uti
return
}
- e.reportShardError(sh, "could not put object to shard", err)
+ e.reportShardError(sh, "could not put object to shard", err, zap.Stringer("address", addr))
return
}
diff --git a/pkg/local_object_storage/engine/range.go b/pkg/local_object_storage/engine/range.go
index f5b33a251..cbf26ff4e 100644
--- a/pkg/local_object_storage/engine/range.go
+++ b/pkg/local_object_storage/engine/range.go
@@ -208,7 +208,7 @@ func (i *getRangeShardIterator) tryGetWithMeta(ctx context.Context) {
return true // stop, return it back
default:
- i.Engine.reportShardError(sh, "could not get object from shard", err)
+ i.Engine.reportShardError(sh, "could not get object from shard", err, zap.Stringer("address", i.Address))
return false
}
})
From 10602b55b1974512b5cac603b87ae6f1b2fe84b5 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Mon, 5 Aug 2024 16:13:43 +0300
Subject: [PATCH 021/705] [#1295] engine: Resolve funlen linter
Signed-off-by: Dmitrii Stepanov
---
pkg/local_object_storage/engine/lock.go | 8 --------
1 file changed, 8 deletions(-)
diff --git a/pkg/local_object_storage/engine/lock.go b/pkg/local_object_storage/engine/lock.go
index 3a41a7848..ac8fa9c6f 100644
--- a/pkg/local_object_storage/engine/lock.go
+++ b/pkg/local_object_storage/engine/lock.go
@@ -63,11 +63,9 @@ func (e *StorageEngine) lock(ctx context.Context, idCnr cid.ID, locker oid.ID, l
func (e *StorageEngine) lockSingle(ctx context.Context, idCnr cid.ID, locker, locked oid.ID, checkExists bool) (status uint8) {
// code is pretty similar to inhumeAddr, maybe unify?
root := false
-
var addrLocked oid.Address
addrLocked.SetContainer(idCnr)
addrLocked.SetObject(locked)
-
e.iterateOverSortedShards(addrLocked, func(_ int, sh hashedShard) (stop bool) {
defer func() {
// if object is root we continue since information about it
@@ -80,7 +78,6 @@ func (e *StorageEngine) lockSingle(ctx context.Context, idCnr cid.ID, locker, lo
if checkExists {
var existsPrm shard.ExistsPrm
existsPrm.Address = addrLocked
-
exRes, err := sh.Exists(ctx, existsPrm)
if err != nil {
var siErr *objectSDK.SplitInfoError
@@ -111,7 +108,6 @@ func (e *StorageEngine) lockSingle(ctx context.Context, idCnr cid.ID, locker, lo
// do not lock it
return true
}
-
e.reportShardError(sh, "could not check locked object for presence in shard", err, zap.Stringer("container_id", idCnr),
zap.Stringer("locker_id", locker), zap.Stringer("locked_id", locked))
return
@@ -133,14 +129,10 @@ func (e *StorageEngine) lockSingle(ctx context.Context, idCnr cid.ID, locker, lo
status = 1
return true
}
-
return false
}
-
status = 2
-
return true
})
-
return
}
From 8e51d7849a41c1135695650d9a748d0ba29a9dfb Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Tue, 6 Aug 2024 09:32:40 +0300
Subject: [PATCH 022/705] [#1295] getSvc: Assemble complex EC object headers
without linking object
Signed-off-by: Dmitrii Stepanov
---
pkg/local_object_storage/engine/head.go | 15 ++++-----
pkg/local_object_storage/metabase/get.go | 22 +++++++++---
pkg/services/object/get/assemble.go | 4 +--
pkg/services/object/get/assembler.go | 43 ++++++++++++++++++++++++
pkg/services/object/get/get_test.go | 2 +-
pkg/services/object/get/request.go | 4 +--
6 files changed, 72 insertions(+), 18 deletions(-)
diff --git a/pkg/local_object_storage/engine/head.go b/pkg/local_object_storage/engine/head.go
index dfe5e48a1..6857a3631 100644
--- a/pkg/local_object_storage/engine/head.go
+++ b/pkg/local_object_storage/engine/head.go
@@ -127,17 +127,16 @@ func (e *StorageEngine) head(ctx context.Context, prm HeadPrm) (HeadRes, error)
return true
})
+ if head != nil {
+ return HeadRes{head: head}, nil
+ }
if outSI != nil {
return HeadRes{}, logicerr.Wrap(objectSDK.NewSplitInfoError(outSI))
- } else if outEI != nil {
- return HeadRes{}, logicerr.Wrap(objectSDK.NewECInfoError(outEI))
- } else if head == nil {
- return HeadRes{}, outError
}
-
- return HeadRes{
- head: head,
- }, nil
+ if outEI != nil {
+ return HeadRes{}, logicerr.Wrap(objectSDK.NewECInfoError(outEI))
+ }
+ return HeadRes{}, outError
}
// Head reads object header from local storage by provided address.
diff --git a/pkg/local_object_storage/metabase/get.go b/pkg/local_object_storage/metabase/get.go
index d9acd4ce2..b79f6cb14 100644
--- a/pkg/local_object_storage/metabase/get.go
+++ b/pkg/local_object_storage/metabase/get.go
@@ -160,11 +160,23 @@ func getVirtualObject(tx *bbolt.Tx, cnr cid.ID, key []byte, raw bool) (*objectSD
return nil, logicerr.Wrap(new(apistatus.ObjectNotFound))
}
- // pick last item, for now there is not difference which address to pick
- // but later list might be sorted so first or last value can be more
- // prioritized to choose
- virtualOID := relativeLst[len(relativeLst)-1]
- data := getFromBucket(tx, primaryBucketName(cnr, bucketName), virtualOID)
+ var data []byte
+ for i := 0; i < len(relativeLst) && len(data) == 0; i++ {
+ virtualOID := relativeLst[len(relativeLst)-i-1]
+ data = getFromBucket(tx, primaryBucketName(cnr, bucketName), virtualOID)
+ }
+
+ if len(data) == 0 {
+ // check if any of the relatives is an EC object
+ for _, relative := range relativeLst {
+ data = getFromBucket(tx, ecInfoBucketName(cnr, bucketName), relative)
+ if len(data) > 0 {
+ // we can't return object headers, but can return error,
+ // so assembler can try to assemble complex object
+ return nil, getSplitInfoError(tx, cnr, key)
+ }
+ }
+ }
child := objectSDK.New()
diff --git a/pkg/services/object/get/assemble.go b/pkg/services/object/get/assemble.go
index ba6fddec5..9f17f1e4c 100644
--- a/pkg/services/object/get/assemble.go
+++ b/pkg/services/object/get/assemble.go
@@ -12,7 +12,7 @@ import (
)
func (r *request) assemble(ctx context.Context) {
- if !r.canAssemble() {
+ if !r.canAssembleComplexObject() {
r.log.Debug(logs.GetCanNotAssembleTheObject)
return
}
@@ -38,7 +38,7 @@ func (r *request) assemble(ctx context.Context) {
r.log.Debug(logs.GetTryingToAssembleTheObject)
r.prm.common = r.prm.common.WithLocalOnly(false)
- assembler := newAssembler(r.address(), r.splitInfo(), r.ctxRange(), r)
+ assembler := newAssembler(r.address(), r.splitInfo(), r.ctxRange(), r, r.headOnly())
r.log.Debug(logs.GetAssemblingSplittedObject,
zap.Uint64("range_offset", r.ctxRange().GetOffset()),
diff --git a/pkg/services/object/get/assembler.go b/pkg/services/object/get/assembler.go
index 025296ec7..ff3f90bf2 100644
--- a/pkg/services/object/get/assembler.go
+++ b/pkg/services/object/get/assembler.go
@@ -19,6 +19,7 @@ type assembler struct {
splitInfo *objectSDK.SplitInfo
rng *objectSDK.Range
objGetter objectGetter
+ head bool
currentOffset uint64
@@ -30,18 +31,23 @@ func newAssembler(
splitInfo *objectSDK.SplitInfo,
rng *objectSDK.Range,
objGetter objectGetter,
+ head bool,
) *assembler {
return &assembler{
addr: addr,
rng: rng,
splitInfo: splitInfo,
objGetter: objGetter,
+ head: head,
}
}
// Assemble assembles splitted large object and writes it's content to ObjectWriter.
// It returns parent object.
func (a *assembler) Assemble(ctx context.Context, writer ObjectWriter) (*objectSDK.Object, error) {
+ if a.head {
+ return a.assembleHeader(ctx, writer)
+ }
sourceObjectID, ok := a.getLastPartOrLinkObjectID()
if !ok {
return nil, objectSDK.NewSplitInfoError(a.splitInfo)
@@ -65,6 +71,43 @@ func (a *assembler) Assemble(ctx context.Context, writer ObjectWriter) (*objectS
return a.parentObject, nil
}
+func (a *assembler) assembleHeader(ctx context.Context, writer ObjectWriter) (*objectSDK.Object, error) {
+ var sourceObjectIDs []oid.ID
+ sourceObjectID, ok := a.splitInfo.Link()
+ if ok {
+ sourceObjectIDs = append(sourceObjectIDs, sourceObjectID)
+ }
+ sourceObjectID, ok = a.splitInfo.LastPart()
+ if ok {
+ sourceObjectIDs = append(sourceObjectIDs, sourceObjectID)
+ }
+ if len(sourceObjectIDs) == 0 {
+ return nil, objectSDK.NewSplitInfoError(a.splitInfo)
+ }
+ for _, sourceObjectID = range sourceObjectIDs {
+ obj, err := a.getParent(ctx, sourceObjectID, writer)
+ if err == nil {
+ return obj, nil
+ }
+ }
+ return nil, objectSDK.NewSplitInfoError(a.splitInfo)
+}
+
+func (a *assembler) getParent(ctx context.Context, sourceObjectID oid.ID, writer ObjectWriter) (*objectSDK.Object, error) {
+ obj, err := a.objGetter.HeadObject(ctx, sourceObjectID)
+ if err != nil {
+ return nil, err
+ }
+ parent := obj.Parent()
+ if parent == nil {
+ return nil, objectSDK.NewSplitInfoError(a.splitInfo)
+ }
+ if err := writer.WriteHeader(ctx, parent); err != nil {
+ return nil, err
+ }
+ return obj, nil
+}
+
func (a *assembler) getLastPartOrLinkObjectID() (oid.ID, bool) {
sourceObjectID, ok := a.splitInfo.Link()
if ok {
diff --git a/pkg/services/object/get/get_test.go b/pkg/services/object/get/get_test.go
index 29a15ba78..1fc6b7b20 100644
--- a/pkg/services/object/get/get_test.go
+++ b/pkg/services/object/get/get_test.go
@@ -730,7 +730,7 @@ func TestGetRemoteSmall(t *testing.T) {
t.Run("VIRTUAL", func(t *testing.T) {
testHeadVirtual := func(svc *Service, addr oid.Address, i *objectSDK.SplitInfo) {
- headPrm := newHeadPrm(false, nil)
+ headPrm := newHeadPrm(true, nil)
headPrm.WithAddress(addr)
errSplit := objectSDK.NewSplitInfoError(objectSDK.NewSplitInfo())
diff --git a/pkg/services/object/get/request.go b/pkg/services/object/get/request.go
index 9ddfeddf2..1a7a43a35 100644
--- a/pkg/services/object/get/request.go
+++ b/pkg/services/object/get/request.go
@@ -88,8 +88,8 @@ func (r *request) key() (*ecdsa.PrivateKey, error) {
return r.keyStore.GetKey(sessionInfo)
}
-func (r *request) canAssemble() bool {
- return !r.isRaw() && !r.headOnly()
+func (r *request) canAssembleComplexObject() bool {
+ return !r.isRaw()
}
func (r *request) splitInfo() *objectSDK.SplitInfo {
From 5c01bd5be89b40f2bd5edd4526b474644e053f11 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Tue, 6 Aug 2024 13:20:33 +0300
Subject: [PATCH 023/705] [#1298] writecache: Add `restore-mode` flag for Seal
command
Signed-off-by: Dmitrii Stepanov
---
cmd/frostfs-cli/modules/control/writecache.go | 5 +
pkg/local_object_storage/engine/writecache.go | 4 +-
pkg/local_object_storage/shard/writecache.go | 5 +-
pkg/local_object_storage/writecache/seal.go | 19 +-
.../writecache/writecache.go | 7 +-
pkg/services/control/ir/service_grpc.pb.go | 6 +-
.../control/server/seal_writecache.go | 1 +
pkg/services/control/service.pb.go | 347 +++++++++---------
pkg/services/control/service.proto | 3 +
pkg/services/control/service_frostfs.pb.go | 2 +
pkg/services/tree/service_grpc.pb.go | 44 +--
11 files changed, 238 insertions(+), 205 deletions(-)
diff --git a/cmd/frostfs-cli/modules/control/writecache.go b/cmd/frostfs-cli/modules/control/writecache.go
index abc4ed2e6..a665ccae8 100644
--- a/cmd/frostfs-cli/modules/control/writecache.go
+++ b/cmd/frostfs-cli/modules/control/writecache.go
@@ -9,6 +9,8 @@ import (
"github.com/spf13/cobra"
)
+const restoreModeFlag = "restore-mode"
+
var writecacheShardCmd = &cobra.Command{
Use: "writecache",
Short: "Operations with storage node's write-cache",
@@ -26,10 +28,12 @@ func sealWritecache(cmd *cobra.Command, _ []string) {
pk := key.Get(cmd)
ignoreErrors, _ := cmd.Flags().GetBool(ignoreErrorsFlag)
+ restoreMode, _ := cmd.Flags().GetBool(restoreModeFlag)
req := &control.SealWriteCacheRequest{Body: &control.SealWriteCacheRequest_Body{
Shard_ID: getShardIDList(cmd),
IgnoreErrors: ignoreErrors,
+ RestoreMode: restoreMode,
}}
signRequest(cmd, pk, req)
@@ -68,6 +72,7 @@ func initControlShardsWritecacheCmd() {
ff.StringSlice(shardIDFlag, nil, "List of shard IDs in base58 encoding")
ff.Bool(shardAllFlag, false, "Process all shards")
ff.Bool(ignoreErrorsFlag, true, "Skip invalid/unreadable objects")
+ ff.Bool(restoreModeFlag, false, "Restore writecache's mode after sealing")
sealWritecacheShardCmd.MarkFlagsMutuallyExclusive(shardIDFlag, shardAllFlag)
}
diff --git a/pkg/local_object_storage/engine/writecache.go b/pkg/local_object_storage/engine/writecache.go
index da488260a..8f37d7860 100644
--- a/pkg/local_object_storage/engine/writecache.go
+++ b/pkg/local_object_storage/engine/writecache.go
@@ -70,6 +70,7 @@ func (e *StorageEngine) FlushWriteCache(ctx context.Context, p FlushWriteCachePr
type SealWriteCachePrm struct {
ShardIDs []*shard.ID
IgnoreErrors bool
+ RestoreMode bool
}
type ShardSealResult struct {
@@ -88,6 +89,7 @@ func (e *StorageEngine) SealWriteCache(ctx context.Context, prm SealWriteCachePr
trace.WithAttributes(
attribute.Int("shard_id_count", len(prm.ShardIDs)),
attribute.Bool("ignore_errors", prm.IgnoreErrors),
+ attribute.Bool("restore_mode", prm.RestoreMode),
))
defer span.End()
@@ -114,7 +116,7 @@ func (e *StorageEngine) SealWriteCache(ctx context.Context, prm SealWriteCachePr
return nil
}
- err := sh.SealWriteCache(egCtx, shard.SealWriteCachePrm{IgnoreErrors: prm.IgnoreErrors})
+ err := sh.SealWriteCache(egCtx, shard.SealWriteCachePrm{IgnoreErrors: prm.IgnoreErrors, RestoreMode: prm.RestoreMode})
resGuard.Lock()
defer resGuard.Unlock()
diff --git a/pkg/local_object_storage/shard/writecache.go b/pkg/local_object_storage/shard/writecache.go
index 05e014d29..9edad7170 100644
--- a/pkg/local_object_storage/shard/writecache.go
+++ b/pkg/local_object_storage/shard/writecache.go
@@ -4,6 +4,7 @@ import (
"context"
"errors"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
@@ -59,6 +60,7 @@ func (s *Shard) FlushWriteCache(ctx context.Context, p FlushWriteCachePrm) error
type SealWriteCachePrm struct {
IgnoreErrors bool
+ RestoreMode bool
}
// SealWriteCache flushes all data from the write-cache and moves it to degraded read only mode.
@@ -67,6 +69,7 @@ func (s *Shard) SealWriteCache(ctx context.Context, p SealWriteCachePrm) error {
trace.WithAttributes(
attribute.String("shard_id", s.ID().String()),
attribute.Bool("ignore_errors", p.IgnoreErrors),
+ attribute.Bool("restore_mode", p.RestoreMode),
))
defer span.End()
@@ -84,5 +87,5 @@ func (s *Shard) SealWriteCache(ctx context.Context, p SealWriteCachePrm) error {
return ErrDegradedMode
}
- return s.writeCache.Seal(ctx, p.IgnoreErrors)
+ return s.writeCache.Seal(ctx, writecache.SealPrm{IgnoreErrors: p.IgnoreErrors, RestoreMode: p.RestoreMode})
}
diff --git a/pkg/local_object_storage/writecache/seal.go b/pkg/local_object_storage/writecache/seal.go
index 48107a75f..22b4e0988 100644
--- a/pkg/local_object_storage/writecache/seal.go
+++ b/pkg/local_object_storage/writecache/seal.go
@@ -9,20 +9,29 @@ import (
"go.opentelemetry.io/otel/trace"
)
-func (c *cache) Seal(ctx context.Context, ignoreErrors bool) error {
+func (c *cache) Seal(ctx context.Context, prm SealPrm) error {
ctx, span := tracing.StartSpanFromContext(ctx, "writecache.Seal",
trace.WithAttributes(
- attribute.Bool("ignore_errors", ignoreErrors),
+ attribute.Bool("ignore_errors", prm.IgnoreErrors),
+ attribute.Bool("restore_mode", prm.RestoreMode),
))
defer span.End()
c.modeMtx.Lock()
defer c.modeMtx.Unlock()
+ sourceMode := c.mode
// flush will be done by setMode
- err := c.setMode(ctx, mode.DegradedReadOnly, ignoreErrors)
- if err == nil {
- c.metrics.SetMode(mode.ComponentDisabled)
+ err := c.setMode(ctx, mode.DegradedReadOnly, prm.IgnoreErrors)
+ if err != nil {
+ return err
+ }
+ c.metrics.SetMode(mode.ComponentDisabled)
+ if prm.RestoreMode {
+ err = c.setMode(ctx, sourceMode, prm.IgnoreErrors)
+ if err == nil {
+ c.metrics.SetMode(mode.ConvertToComponentMode(sourceMode))
+ }
}
return err
}
diff --git a/pkg/local_object_storage/writecache/writecache.go b/pkg/local_object_storage/writecache/writecache.go
index 71dba61cf..7085a57b2 100644
--- a/pkg/local_object_storage/writecache/writecache.go
+++ b/pkg/local_object_storage/writecache/writecache.go
@@ -20,6 +20,11 @@ type Info struct {
Path string
}
+type SealPrm struct {
+ IgnoreErrors bool
+ RestoreMode bool
+}
+
// Cache represents write-cache for objects.
type Cache interface {
Get(ctx context.Context, address oid.Address) (*objectSDK.Object, error)
@@ -36,7 +41,7 @@ type Cache interface {
SetLogger(*logger.Logger)
DumpInfo() Info
Flush(context.Context, bool, bool) error
- Seal(context.Context, bool) error
+ Seal(context.Context, SealPrm) error
Init() error
Open(ctx context.Context, mode mode.Mode) error
diff --git a/pkg/services/control/ir/service_grpc.pb.go b/pkg/services/control/ir/service_grpc.pb.go
index 724149c44..336bf5f70 100644
--- a/pkg/services/control/ir/service_grpc.pb.go
+++ b/pkg/services/control/ir/service_grpc.pb.go
@@ -35,7 +35,8 @@ type ControlServiceClient interface {
TickEpoch(ctx context.Context, in *TickEpochRequest, opts ...grpc.CallOption) (*TickEpochResponse, error)
// Forces a node removal to be signaled by the IR node with high probability.
RemoveNode(ctx context.Context, in *RemoveNodeRequest, opts ...grpc.CallOption) (*RemoveNodeResponse, error)
- // Forces a container removal to be signaled by the IR node with high probability.
+ // Forces a container removal to be signaled by the IR node with high
+ // probability.
RemoveContainer(ctx context.Context, in *RemoveContainerRequest, opts ...grpc.CallOption) (*RemoveContainerResponse, error)
}
@@ -93,7 +94,8 @@ type ControlServiceServer interface {
TickEpoch(context.Context, *TickEpochRequest) (*TickEpochResponse, error)
// Forces a node removal to be signaled by the IR node with high probability.
RemoveNode(context.Context, *RemoveNodeRequest) (*RemoveNodeResponse, error)
- // Forces a container removal to be signaled by the IR node with high probability.
+ // Forces a container removal to be signaled by the IR node with high
+ // probability.
RemoveContainer(context.Context, *RemoveContainerRequest) (*RemoveContainerResponse, error)
}
diff --git a/pkg/services/control/server/seal_writecache.go b/pkg/services/control/server/seal_writecache.go
index e3f8b8caf..b663cfc81 100644
--- a/pkg/services/control/server/seal_writecache.go
+++ b/pkg/services/control/server/seal_writecache.go
@@ -19,6 +19,7 @@ func (s *Server) SealWriteCache(ctx context.Context, req *control.SealWriteCache
prm := engine.SealWriteCachePrm{
ShardIDs: s.getShardIDList(req.GetBody().GetShard_ID()),
IgnoreErrors: req.GetBody().GetIgnoreErrors(),
+ RestoreMode: req.GetBody().GetRestoreMode(),
}
res, err := s.s.SealWriteCache(ctx, prm)
diff --git a/pkg/services/control/service.pb.go b/pkg/services/control/service.pb.go
index 727dd1218..895b74368 100644
--- a/pkg/services/control/service.pb.go
+++ b/pkg/services/control/service.pb.go
@@ -4727,6 +4727,8 @@ type SealWriteCacheRequest_Body struct {
Shard_ID [][]byte `protobuf:"bytes,1,rep,name=shard_ID,json=shardID,proto3" json:"shard_ID,omitempty"`
// Flag indicating whether object read errors should be ignored.
IgnoreErrors bool `protobuf:"varint,2,opt,name=ignore_errors,json=ignoreErrors,proto3" json:"ignore_errors,omitempty"`
+ // If true, then writecache will be sealed, but mode will be restored to the current one.
+ RestoreMode bool `protobuf:"varint,4,opt,name=restore_mode,json=restoreMode,proto3" json:"restore_mode,omitempty"`
}
func (x *SealWriteCacheRequest_Body) Reset() {
@@ -4775,6 +4777,13 @@ func (x *SealWriteCacheRequest_Body) GetIgnoreErrors() bool {
return false
}
+func (x *SealWriteCacheRequest_Body) GetRestoreMode() bool {
+ if x != nil {
+ return x.RestoreMode
+ }
+ return false
+}
+
type SealWriteCacheResponse_Body struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -5482,7 +5491,7 @@ var file_pkg_services_control_service_proto_rawDesc = []byte{
0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02,
0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53,
0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74,
- 0x75, 0x72, 0x65, 0x1a, 0x06, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x22, 0xca, 0x01, 0x0a, 0x15,
+ 0x75, 0x72, 0x65, 0x1a, 0x06, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x22, 0xed, 0x01, 0x0a, 0x15,
0x53, 0x65, 0x61, 0x6c, 0x57, 0x72, 0x69, 0x74, 0x65, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65,
0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x37, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20,
0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x65,
@@ -5491,182 +5500,184 @@ var file_pkg_services_control_service_proto_rawDesc = []byte{
0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e,
0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65,
- 0x1a, 0x46, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x68, 0x61, 0x72,
+ 0x1a, 0x69, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x68, 0x61, 0x72,
0x64, 0x5f, 0x49, 0x44, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x07, 0x73, 0x68, 0x61, 0x72,
0x64, 0x49, 0x44, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x65, 0x72,
0x72, 0x6f, 0x72, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x69, 0x67, 0x6e, 0x6f,
- 0x72, 0x65, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x22, 0xa9, 0x02, 0x0a, 0x16, 0x53, 0x65, 0x61,
- 0x6c, 0x57, 0x72, 0x69, 0x74, 0x65, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f,
- 0x6e, 0x73, 0x65, 0x12, 0x38, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x24, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x65, 0x61, 0x6c,
- 0x57, 0x72, 0x69, 0x74, 0x65, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a,
- 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61,
- 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a,
- 0xa2, 0x01, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x45, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75,
- 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x63, 0x6f, 0x6e, 0x74,
- 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x65, 0x61, 0x6c, 0x57, 0x72, 0x69, 0x74, 0x65, 0x43, 0x61, 0x63,
- 0x68, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x2e,
- 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x1a,
- 0x53, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x68, 0x61,
- 0x72, 0x64, 0x5f, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x73, 0x68, 0x61,
- 0x72, 0x64, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x14,
- 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65,
- 0x72, 0x72, 0x6f, 0x72, 0x22, 0xa1, 0x01, 0x0a, 0x13, 0x44, 0x65, 0x74, 0x61, 0x63, 0x68, 0x53,
- 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x35, 0x0a, 0x04,
- 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x63, 0x6f, 0x6e,
- 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x44, 0x65, 0x74, 0x61, 0x63, 0x68, 0x53, 0x68, 0x61, 0x72, 0x64,
- 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62,
- 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65,
- 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c,
- 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e,
- 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x21, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x19, 0x0a,
- 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x49, 0x44, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52,
- 0x07, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x44, 0x22, 0x88, 0x01, 0x0a, 0x14, 0x44, 0x65, 0x74,
+ 0x72, 0x65, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x65, 0x73, 0x74,
+ 0x6f, 0x72, 0x65, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b,
+ 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x22, 0xa9, 0x02, 0x0a, 0x16,
+ 0x53, 0x65, 0x61, 0x6c, 0x57, 0x72, 0x69, 0x74, 0x65, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x38, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53,
+ 0x65, 0x61, 0x6c, 0x57, 0x72, 0x69, 0x74, 0x65, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79,
+ 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69,
+ 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75,
+ 0x72, 0x65, 0x1a, 0xa2, 0x01, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x45, 0x0a, 0x07, 0x72,
+ 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x63,
+ 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x65, 0x61, 0x6c, 0x57, 0x72, 0x69, 0x74, 0x65,
+ 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f,
+ 0x64, 0x79, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c,
+ 0x74, 0x73, 0x1a, 0x53, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x19, 0x0a, 0x08,
+ 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07,
+ 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65,
+ 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73,
+ 0x73, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0xa1, 0x01, 0x0a, 0x13, 0x44, 0x65, 0x74, 0x61,
+ 0x63, 0x68, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
+ 0x35, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e,
+ 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x44, 0x65, 0x74, 0x61, 0x63, 0x68, 0x53, 0x68,
+ 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79,
+ 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74,
+ 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74,
+ 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73,
+ 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x21, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79,
+ 0x12, 0x19, 0x0a, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x49, 0x44, 0x18, 0x01, 0x20, 0x03,
+ 0x28, 0x0c, 0x52, 0x07, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x44, 0x22, 0x88, 0x01, 0x0a, 0x14,
+ 0x44, 0x65, 0x74, 0x61, 0x63, 0x68, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70,
+ 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x36, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x44, 0x65, 0x74,
0x61, 0x63, 0x68, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
- 0x65, 0x12, 0x36, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x22, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x44, 0x65, 0x74, 0x61, 0x63, 0x68,
- 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42,
- 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67,
- 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63,
- 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65,
- 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x06, 0x0a, 0x04, 0x42,
- 0x6f, 0x64, 0x79, 0x32, 0x8b, 0x10, 0x0a, 0x0e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x53,
- 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x48, 0x0a, 0x0b, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68,
- 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x1b, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e,
- 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x48, 0x65, 0x61,
- 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
- 0x12, 0x54, 0x0a, 0x0f, 0x53, 0x65, 0x74, 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x53, 0x74, 0x61,
- 0x74, 0x75, 0x73, 0x12, 0x1f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x65,
- 0x74, 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53,
- 0x65, 0x74, 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65,
- 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x54, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x4e, 0x65, 0x74,
- 0x6d, 0x61, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1f, 0x2e, 0x63, 0x6f, 0x6e, 0x74,
- 0x72, 0x6f, 0x6c, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x53, 0x74, 0x61,
- 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x63, 0x6f, 0x6e,
- 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x53, 0x74,
- 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x48, 0x0a, 0x0b,
- 0x44, 0x72, 0x6f, 0x70, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x1b, 0x2e, 0x63, 0x6f,
+ 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09,
+ 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74,
+ 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x06,
+ 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x32, 0x8b, 0x10, 0x0a, 0x0e, 0x43, 0x6f, 0x6e, 0x74, 0x72,
+ 0x6f, 0x6c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x48, 0x0a, 0x0b, 0x48, 0x65, 0x61,
+ 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x1b, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72,
+ 0x6f, 0x6c, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e,
+ 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x12, 0x54, 0x0a, 0x0f, 0x53, 0x65, 0x74, 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70,
+ 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c,
+ 0x2e, 0x53, 0x65, 0x74, 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f,
+ 0x6c, 0x2e, 0x53, 0x65, 0x74, 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75,
+ 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x54, 0x0a, 0x0f, 0x47, 0x65, 0x74,
+ 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1f, 0x2e, 0x63,
+ 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70,
+ 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e,
+ 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x65, 0x74, 0x6d, 0x61,
+ 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
+ 0x48, 0x0a, 0x0b, 0x44, 0x72, 0x6f, 0x70, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x1b,
+ 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x44, 0x72, 0x6f, 0x70, 0x4f, 0x62, 0x6a,
+ 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x63, 0x6f,
0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x44, 0x72, 0x6f, 0x70, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74,
- 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72,
- 0x6f, 0x6c, 0x2e, 0x44, 0x72, 0x6f, 0x70, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65,
- 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x45, 0x0a, 0x0a, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x68,
- 0x61, 0x72, 0x64, 0x73, 0x12, 0x1a, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x4c,
- 0x69, 0x73, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x1a, 0x1b, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53,
- 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4b, 0x0a,
- 0x0c, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x1c, 0x2e,
- 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64,
- 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x63, 0x6f,
- 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x4d, 0x6f,
- 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x54, 0x0a, 0x0f, 0x53, 0x79,
- 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x65, 0x54, 0x72, 0x65, 0x65, 0x12, 0x1f, 0x2e,
- 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e,
- 0x69, 0x7a, 0x65, 0x54, 0x72, 0x65, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20,
- 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f,
- 0x6e, 0x69, 0x7a, 0x65, 0x54, 0x72, 0x65, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
- 0x12, 0x4e, 0x0a, 0x0d, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72,
- 0x64, 0x12, 0x1d, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x45, 0x76, 0x61, 0x63,
- 0x75, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x1a, 0x1e, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x45, 0x76, 0x61, 0x63, 0x75,
- 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
- 0x12, 0x63, 0x0a, 0x14, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76,
- 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x24, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72,
- 0x6f, 0x6c, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61,
- 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25,
- 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x68,
- 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6f, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72,
+ 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x45, 0x0a, 0x0a, 0x4c, 0x69, 0x73,
+ 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x1a, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f,
+ 0x6c, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x4c, 0x69,
+ 0x73, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x12, 0x4b, 0x0a, 0x0c, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x4d, 0x6f, 0x64, 0x65,
+ 0x12, 0x1c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x65, 0x74, 0x53, 0x68,
+ 0x61, 0x72, 0x64, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d,
+ 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72,
+ 0x64, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x54, 0x0a,
+ 0x0f, 0x53, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x65, 0x54, 0x72, 0x65, 0x65,
+ 0x12, 0x1f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x68,
+ 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x65, 0x54, 0x72, 0x65, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x1a, 0x20, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x79, 0x6e, 0x63,
+ 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x65, 0x54, 0x72, 0x65, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x12, 0x4e, 0x0a, 0x0d, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x65, 0x53,
+ 0x68, 0x61, 0x72, 0x64, 0x12, 0x1d, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x45,
+ 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x45, 0x76,
+ 0x61, 0x63, 0x75, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x12, 0x63, 0x0a, 0x14, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x68, 0x61, 0x72,
+ 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x24, 0x2e, 0x63, 0x6f,
+ 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64,
+ 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x1a, 0x25, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x74, 0x61, 0x72,
+ 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6f, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x53,
+ 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74,
+ 0x61, 0x74, 0x75, 0x73, 0x12, 0x28, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x47,
+ 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29,
+ 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72,
0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75,
- 0x73, 0x12, 0x28, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x47, 0x65, 0x74, 0x53,
- 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74,
- 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x63, 0x6f,
- 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76,
- 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65,
- 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x75, 0x0a, 0x1a, 0x52, 0x65, 0x73, 0x65, 0x74, 0x53,
- 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74,
- 0x61, 0x74, 0x75, 0x73, 0x12, 0x2a, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x52,
- 0x65, 0x73, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x1a, 0x2b, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x74,
- 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53,
- 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x60, 0x0a,
- 0x13, 0x53, 0x74, 0x6f, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53,
- 0x74, 0x6f, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x63, 0x6f, 0x6e, 0x74,
- 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61,
- 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
- 0x45, 0x0a, 0x0a, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x43, 0x61, 0x63, 0x68, 0x65, 0x12, 0x1a, 0x2e,
- 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x43, 0x61, 0x63,
- 0x68, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x63, 0x6f, 0x6e, 0x74,
- 0x72, 0x6f, 0x6c, 0x2e, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65,
- 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x39, 0x0a, 0x06, 0x44, 0x6f, 0x63, 0x74, 0x6f, 0x72,
- 0x12, 0x16, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x44, 0x6f, 0x63, 0x74, 0x6f,
- 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72,
- 0x6f, 0x6c, 0x2e, 0x44, 0x6f, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
- 0x65, 0x12, 0x66, 0x0a, 0x15, 0x41, 0x64, 0x64, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63,
- 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x12, 0x25, 0x2e, 0x63, 0x6f, 0x6e,
- 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x41, 0x64, 0x64, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63,
- 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x1a, 0x26, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x41, 0x64, 0x64, 0x43,
+ 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x75, 0x0a, 0x1a, 0x52, 0x65, 0x73,
+ 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2a, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f,
+ 0x6c, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63,
+ 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x52, 0x65,
+ 0x73, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x12, 0x60, 0x0a, 0x13, 0x53, 0x74, 0x6f, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61,
+ 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f,
+ 0x6c, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x63,
+ 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64,
+ 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x12, 0x45, 0x0a, 0x0a, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x43, 0x61, 0x63, 0x68, 0x65,
+ 0x12, 0x1a, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x46, 0x6c, 0x75, 0x73, 0x68,
+ 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x63,
+ 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x43, 0x61, 0x63, 0x68,
+ 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x39, 0x0a, 0x06, 0x44, 0x6f, 0x63,
+ 0x74, 0x6f, 0x72, 0x12, 0x16, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x44, 0x6f,
+ 0x63, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x63, 0x6f,
+ 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x44, 0x6f, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70,
+ 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x66, 0x0a, 0x15, 0x41, 0x64, 0x64, 0x43, 0x68, 0x61, 0x69, 0x6e,
+ 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x12, 0x25, 0x2e,
+ 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x41, 0x64, 0x64, 0x43, 0x68, 0x61, 0x69, 0x6e,
+ 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x41,
+ 0x64, 0x64, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72,
+ 0x72, 0x69, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x66, 0x0a, 0x15,
+ 0x47, 0x65, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65,
+ 0x72, 0x72, 0x69, 0x64, 0x65, 0x12, 0x25, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e,
+ 0x47, 0x65, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65,
+ 0x72, 0x72, 0x69, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x63,
+ 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c,
+ 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70,
+ 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6c, 0x0a, 0x17, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x68, 0x61, 0x69,
+ 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x12,
+ 0x27, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x68,
+ 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65,
+ 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72,
+ 0x6f, 0x6c, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61,
+ 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x12, 0x6f, 0x0a, 0x18, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x43, 0x68, 0x61, 0x69,
+ 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x12, 0x28,
+ 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x43,
0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64,
- 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x66, 0x0a, 0x15, 0x47, 0x65, 0x74,
- 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69,
- 0x64, 0x65, 0x12, 0x25, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x47, 0x65, 0x74,
- 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69,
- 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x63, 0x6f, 0x6e, 0x74,
- 0x72, 0x6f, 0x6c, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61,
- 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
- 0x65, 0x12, 0x6c, 0x0a, 0x17, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f,
- 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x12, 0x27, 0x2e, 0x63,
- 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e,
- 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e,
- 0x4c, 0x69, 0x73, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76,
- 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
- 0x6f, 0x0a, 0x18, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f,
- 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x12, 0x28, 0x2e, 0x63, 0x6f,
- 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x43, 0x68, 0x61, 0x69,
- 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e,
- 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c,
- 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
- 0x12, 0x8a, 0x01, 0x0a, 0x21, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e,
- 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x42, 0x79,
- 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c,
- 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61,
- 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x42, 0x79, 0x54, 0x61, 0x72, 0x67,
- 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x32, 0x2e, 0x63, 0x6f, 0x6e, 0x74,
+ 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72,
+ 0x6f, 0x6c, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f,
+ 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x12, 0x8a, 0x01, 0x0a, 0x21, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x43, 0x68,
+ 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65,
+ 0x73, 0x42, 0x79, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x74,
0x72, 0x6f, 0x6c, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c,
0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x42, 0x79, 0x54,
- 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x72, 0x0a,
- 0x19, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x4c, 0x6f, 0x63, 0x61,
- 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x12, 0x29, 0x2e, 0x63, 0x6f, 0x6e,
- 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73,
- 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e,
- 0x4c, 0x69, 0x73, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x4c, 0x6f, 0x63, 0x61, 0x6c,
- 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
- 0x65, 0x12, 0x51, 0x0a, 0x0e, 0x53, 0x65, 0x61, 0x6c, 0x57, 0x72, 0x69, 0x74, 0x65, 0x43, 0x61,
- 0x63, 0x68, 0x65, 0x12, 0x1e, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x65,
- 0x61, 0x6c, 0x57, 0x72, 0x69, 0x74, 0x65, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x65,
- 0x61, 0x6c, 0x57, 0x72, 0x69, 0x74, 0x65, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x73, 0x70,
- 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4b, 0x0a, 0x0c, 0x44, 0x65, 0x74, 0x61, 0x63, 0x68, 0x53, 0x68,
- 0x61, 0x72, 0x64, 0x73, 0x12, 0x1c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x44,
- 0x65, 0x74, 0x61, 0x63, 0x68, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x44, 0x65, 0x74,
- 0x61, 0x63, 0x68, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
- 0x65, 0x42, 0x41, 0x5a, 0x3f, 0x67, 0x69, 0x74, 0x2e, 0x66, 0x72, 0x6f, 0x73, 0x74, 0x66, 0x73,
- 0x2e, 0x69, 0x6e, 0x66, 0x6f, 0x2f, 0x54, 0x72, 0x75, 0x65, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x4c,
- 0x61, 0x62, 0x2f, 0x66, 0x72, 0x6f, 0x73, 0x74, 0x66, 0x73, 0x2d, 0x6e, 0x6f, 0x64, 0x65, 0x2f,
- 0x70, 0x6b, 0x67, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x63, 0x6f, 0x6e,
- 0x74, 0x72, 0x6f, 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+ 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x32, 0x2e, 0x63,
+ 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x43, 0x68, 0x61,
+ 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73,
+ 0x42, 0x79, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x12, 0x72, 0x0a, 0x19, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x4c,
+ 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x12, 0x29, 0x2e,
+ 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x61, 0x72, 0x67,
+ 0x65, 0x74, 0x73, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65,
+ 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72,
+ 0x6f, 0x6c, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x4c, 0x6f,
+ 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70,
+ 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x51, 0x0a, 0x0e, 0x53, 0x65, 0x61, 0x6c, 0x57, 0x72, 0x69, 0x74,
+ 0x65, 0x43, 0x61, 0x63, 0x68, 0x65, 0x12, 0x1e, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c,
+ 0x2e, 0x53, 0x65, 0x61, 0x6c, 0x57, 0x72, 0x69, 0x74, 0x65, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c,
+ 0x2e, 0x53, 0x65, 0x61, 0x6c, 0x57, 0x72, 0x69, 0x74, 0x65, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4b, 0x0a, 0x0c, 0x44, 0x65, 0x74, 0x61, 0x63,
+ 0x68, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x1c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f,
+ 0x6c, 0x2e, 0x44, 0x65, 0x74, 0x61, 0x63, 0x68, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e,
+ 0x44, 0x65, 0x74, 0x61, 0x63, 0x68, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70,
+ 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x41, 0x5a, 0x3f, 0x67, 0x69, 0x74, 0x2e, 0x66, 0x72, 0x6f, 0x73,
+ 0x74, 0x66, 0x73, 0x2e, 0x69, 0x6e, 0x66, 0x6f, 0x2f, 0x54, 0x72, 0x75, 0x65, 0x43, 0x6c, 0x6f,
+ 0x75, 0x64, 0x4c, 0x61, 0x62, 0x2f, 0x66, 0x72, 0x6f, 0x73, 0x74, 0x66, 0x73, 0x2d, 0x6e, 0x6f,
+ 0x64, 0x65, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f,
+ 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
diff --git a/pkg/services/control/service.proto b/pkg/services/control/service.proto
index 2cd8434fc..a10410025 100644
--- a/pkg/services/control/service.proto
+++ b/pkg/services/control/service.proto
@@ -655,6 +655,9 @@ message SealWriteCacheRequest {
// Flag indicating whether object read errors should be ignored.
bool ignore_errors = 2;
+
+ // If true, then writecache will be sealed, but mode will be restored to the current one.
+ bool restore_mode = 4;
}
Body body = 1;
diff --git a/pkg/services/control/service_frostfs.pb.go b/pkg/services/control/service_frostfs.pb.go
index a287606fa..b6b064973 100644
--- a/pkg/services/control/service_frostfs.pb.go
+++ b/pkg/services/control/service_frostfs.pb.go
@@ -3422,6 +3422,7 @@ func (x *SealWriteCacheRequest_Body) StableSize() (size int) {
}
size += proto.RepeatedBytesSize(1, x.Shard_ID)
size += proto.BoolSize(2, x.IgnoreErrors)
+ size += proto.BoolSize(4, x.RestoreMode)
return size
}
@@ -3443,6 +3444,7 @@ func (x *SealWriteCacheRequest_Body) StableMarshal(buf []byte) []byte {
var offset int
offset += proto.RepeatedBytesMarshal(1, buf[offset:], x.Shard_ID)
offset += proto.BoolMarshal(2, buf[offset:], x.IgnoreErrors)
+ offset += proto.BoolMarshal(4, buf[offset:], x.RestoreMode)
return buf
}
diff --git a/pkg/services/tree/service_grpc.pb.go b/pkg/services/tree/service_grpc.pb.go
index 4c293a4c0..63f96e11a 100644
--- a/pkg/services/tree/service_grpc.pb.go
+++ b/pkg/services/tree/service_grpc.pb.go
@@ -3,7 +3,7 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
-// - protoc-gen-go-grpc v1.4.0
+// - protoc-gen-go-grpc v1.3.0
// - protoc v4.25.0
// source: pkg/services/tree/service.proto
@@ -18,8 +18,8 @@ import (
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
-// Requires gRPC-Go v1.62.0 or later.
-const _ = grpc.SupportPackageIsVersion8
+// Requires gRPC-Go v1.32.0 or later.
+const _ = grpc.SupportPackageIsVersion7
const (
TreeService_Add_FullMethodName = "/tree.TreeService/Add"
@@ -70,9 +70,8 @@ func NewTreeServiceClient(cc grpc.ClientConnInterface) TreeServiceClient {
}
func (c *treeServiceClient) Add(ctx context.Context, in *AddRequest, opts ...grpc.CallOption) (*AddResponse, error) {
- cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(AddResponse)
- err := c.cc.Invoke(ctx, TreeService_Add_FullMethodName, in, out, cOpts...)
+ err := c.cc.Invoke(ctx, TreeService_Add_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
@@ -80,9 +79,8 @@ func (c *treeServiceClient) Add(ctx context.Context, in *AddRequest, opts ...grp
}
func (c *treeServiceClient) AddByPath(ctx context.Context, in *AddByPathRequest, opts ...grpc.CallOption) (*AddByPathResponse, error) {
- cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(AddByPathResponse)
- err := c.cc.Invoke(ctx, TreeService_AddByPath_FullMethodName, in, out, cOpts...)
+ err := c.cc.Invoke(ctx, TreeService_AddByPath_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
@@ -90,9 +88,8 @@ func (c *treeServiceClient) AddByPath(ctx context.Context, in *AddByPathRequest,
}
func (c *treeServiceClient) Remove(ctx context.Context, in *RemoveRequest, opts ...grpc.CallOption) (*RemoveResponse, error) {
- cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(RemoveResponse)
- err := c.cc.Invoke(ctx, TreeService_Remove_FullMethodName, in, out, cOpts...)
+ err := c.cc.Invoke(ctx, TreeService_Remove_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
@@ -100,9 +97,8 @@ func (c *treeServiceClient) Remove(ctx context.Context, in *RemoveRequest, opts
}
func (c *treeServiceClient) Move(ctx context.Context, in *MoveRequest, opts ...grpc.CallOption) (*MoveResponse, error) {
- cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(MoveResponse)
- err := c.cc.Invoke(ctx, TreeService_Move_FullMethodName, in, out, cOpts...)
+ err := c.cc.Invoke(ctx, TreeService_Move_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
@@ -110,9 +106,8 @@ func (c *treeServiceClient) Move(ctx context.Context, in *MoveRequest, opts ...g
}
func (c *treeServiceClient) GetNodeByPath(ctx context.Context, in *GetNodeByPathRequest, opts ...grpc.CallOption) (*GetNodeByPathResponse, error) {
- cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(GetNodeByPathResponse)
- err := c.cc.Invoke(ctx, TreeService_GetNodeByPath_FullMethodName, in, out, cOpts...)
+ err := c.cc.Invoke(ctx, TreeService_GetNodeByPath_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
@@ -120,12 +115,11 @@ func (c *treeServiceClient) GetNodeByPath(ctx context.Context, in *GetNodeByPath
}
func (c *treeServiceClient) GetSubTree(ctx context.Context, in *GetSubTreeRequest, opts ...grpc.CallOption) (TreeService_GetSubTreeClient, error) {
- cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
- stream, err := c.cc.NewStream(ctx, &TreeService_ServiceDesc.Streams[0], TreeService_GetSubTree_FullMethodName, cOpts...)
+ stream, err := c.cc.NewStream(ctx, &TreeService_ServiceDesc.Streams[0], TreeService_GetSubTree_FullMethodName, opts...)
if err != nil {
return nil, err
}
- x := &treeServiceGetSubTreeClient{ClientStream: stream}
+ x := &treeServiceGetSubTreeClient{stream}
if err := x.ClientStream.SendMsg(in); err != nil {
return nil, err
}
@@ -153,9 +147,8 @@ func (x *treeServiceGetSubTreeClient) Recv() (*GetSubTreeResponse, error) {
}
func (c *treeServiceClient) TreeList(ctx context.Context, in *TreeListRequest, opts ...grpc.CallOption) (*TreeListResponse, error) {
- cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(TreeListResponse)
- err := c.cc.Invoke(ctx, TreeService_TreeList_FullMethodName, in, out, cOpts...)
+ err := c.cc.Invoke(ctx, TreeService_TreeList_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
@@ -163,9 +156,8 @@ func (c *treeServiceClient) TreeList(ctx context.Context, in *TreeListRequest, o
}
func (c *treeServiceClient) Apply(ctx context.Context, in *ApplyRequest, opts ...grpc.CallOption) (*ApplyResponse, error) {
- cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(ApplyResponse)
- err := c.cc.Invoke(ctx, TreeService_Apply_FullMethodName, in, out, cOpts...)
+ err := c.cc.Invoke(ctx, TreeService_Apply_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
@@ -173,12 +165,11 @@ func (c *treeServiceClient) Apply(ctx context.Context, in *ApplyRequest, opts ..
}
func (c *treeServiceClient) GetOpLog(ctx context.Context, in *GetOpLogRequest, opts ...grpc.CallOption) (TreeService_GetOpLogClient, error) {
- cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
- stream, err := c.cc.NewStream(ctx, &TreeService_ServiceDesc.Streams[1], TreeService_GetOpLog_FullMethodName, cOpts...)
+ stream, err := c.cc.NewStream(ctx, &TreeService_ServiceDesc.Streams[1], TreeService_GetOpLog_FullMethodName, opts...)
if err != nil {
return nil, err
}
- x := &treeServiceGetOpLogClient{ClientStream: stream}
+ x := &treeServiceGetOpLogClient{stream}
if err := x.ClientStream.SendMsg(in); err != nil {
return nil, err
}
@@ -206,9 +197,8 @@ func (x *treeServiceGetOpLogClient) Recv() (*GetOpLogResponse, error) {
}
func (c *treeServiceClient) Healthcheck(ctx context.Context, in *HealthcheckRequest, opts ...grpc.CallOption) (*HealthcheckResponse, error) {
- cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(HealthcheckResponse)
- err := c.cc.Invoke(ctx, TreeService_Healthcheck_FullMethodName, in, out, cOpts...)
+ err := c.cc.Invoke(ctx, TreeService_Healthcheck_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
@@ -383,7 +373,7 @@ func _TreeService_GetSubTree_Handler(srv interface{}, stream grpc.ServerStream)
if err := stream.RecvMsg(m); err != nil {
return err
}
- return srv.(TreeServiceServer).GetSubTree(m, &treeServiceGetSubTreeServer{ServerStream: stream})
+ return srv.(TreeServiceServer).GetSubTree(m, &treeServiceGetSubTreeServer{stream})
}
type TreeService_GetSubTreeServer interface {
@@ -440,7 +430,7 @@ func _TreeService_GetOpLog_Handler(srv interface{}, stream grpc.ServerStream) er
if err := stream.RecvMsg(m); err != nil {
return err
}
- return srv.(TreeServiceServer).GetOpLog(m, &treeServiceGetOpLogServer{ServerStream: stream})
+ return srv.(TreeServiceServer).GetOpLog(m, &treeServiceGetOpLogServer{stream})
}
type TreeService_GetOpLogServer interface {
From 36efccd86251a8445e6efe1550d342db10b0230a Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Tue, 6 Aug 2024 15:45:53 +0300
Subject: [PATCH 024/705] [#1298] writecache: Add `shrink` flag for Seal
command
Signed-off-by: Dmitrii Stepanov
---
cmd/frostfs-cli/modules/control/writecache.go | 8 +-
internal/logs/logs.go | 1 +
pkg/local_object_storage/engine/writecache.go | 3 +-
pkg/local_object_storage/shard/writecache.go | 3 +-
pkg/local_object_storage/writecache/flush.go | 2 +-
pkg/local_object_storage/writecache/mode.go | 55 ++-
pkg/local_object_storage/writecache/seal.go | 4 +-
.../writecache/writecache.go | 1 +
.../control/server/seal_writecache.go | 1 +
pkg/services/control/service.pb.go | 359 +++++++++---------
pkg/services/control/service.proto | 3 +
pkg/services/control/service_frostfs.pb.go | 2 +
12 files changed, 255 insertions(+), 187 deletions(-)
diff --git a/cmd/frostfs-cli/modules/control/writecache.go b/cmd/frostfs-cli/modules/control/writecache.go
index a665ccae8..b725d8471 100644
--- a/cmd/frostfs-cli/modules/control/writecache.go
+++ b/cmd/frostfs-cli/modules/control/writecache.go
@@ -9,7 +9,10 @@ import (
"github.com/spf13/cobra"
)
-const restoreModeFlag = "restore-mode"
+const (
+ restoreModeFlag = "restore-mode"
+ shrinkFlag = "shrink"
+)
var writecacheShardCmd = &cobra.Command{
Use: "writecache",
@@ -29,11 +32,13 @@ func sealWritecache(cmd *cobra.Command, _ []string) {
ignoreErrors, _ := cmd.Flags().GetBool(ignoreErrorsFlag)
restoreMode, _ := cmd.Flags().GetBool(restoreModeFlag)
+ shrink, _ := cmd.Flags().GetBool(shrinkFlag)
req := &control.SealWriteCacheRequest{Body: &control.SealWriteCacheRequest_Body{
Shard_ID: getShardIDList(cmd),
IgnoreErrors: ignoreErrors,
RestoreMode: restoreMode,
+ Shrink: shrink,
}}
signRequest(cmd, pk, req)
@@ -73,6 +78,7 @@ func initControlShardsWritecacheCmd() {
ff.Bool(shardAllFlag, false, "Process all shards")
ff.Bool(ignoreErrorsFlag, true, "Skip invalid/unreadable objects")
ff.Bool(restoreModeFlag, false, "Restore writecache's mode after sealing")
+ ff.Bool(shrinkFlag, false, "Shrink writecache's internal storage")
sealWritecacheShardCmd.MarkFlagsMutuallyExclusive(shardIDFlag, shardAllFlag)
}
diff --git a/internal/logs/logs.go b/internal/logs/logs.go
index 67f173f29..ebb822e1c 100644
--- a/internal/logs/logs.go
+++ b/internal/logs/logs.go
@@ -539,4 +539,5 @@ const (
PolicerCouldNotGetChunk = "could not get EC chunk"
PolicerCouldNotGetChunks = "could not get EC chunks"
AuditEventLogRecord = "audit event log record"
+ WritecacheShrinkSkippedNotEmpty = "writecache shrink skipped: database is not empty"
)
diff --git a/pkg/local_object_storage/engine/writecache.go b/pkg/local_object_storage/engine/writecache.go
index 8f37d7860..2c5e8cc3a 100644
--- a/pkg/local_object_storage/engine/writecache.go
+++ b/pkg/local_object_storage/engine/writecache.go
@@ -71,6 +71,7 @@ type SealWriteCachePrm struct {
ShardIDs []*shard.ID
IgnoreErrors bool
RestoreMode bool
+ Shrink bool
}
type ShardSealResult struct {
@@ -116,7 +117,7 @@ func (e *StorageEngine) SealWriteCache(ctx context.Context, prm SealWriteCachePr
return nil
}
- err := sh.SealWriteCache(egCtx, shard.SealWriteCachePrm{IgnoreErrors: prm.IgnoreErrors, RestoreMode: prm.RestoreMode})
+ err := sh.SealWriteCache(egCtx, shard.SealWriteCachePrm{IgnoreErrors: prm.IgnoreErrors, RestoreMode: prm.RestoreMode, Shrink: prm.Shrink})
resGuard.Lock()
defer resGuard.Unlock()
diff --git a/pkg/local_object_storage/shard/writecache.go b/pkg/local_object_storage/shard/writecache.go
index 9edad7170..c29710930 100644
--- a/pkg/local_object_storage/shard/writecache.go
+++ b/pkg/local_object_storage/shard/writecache.go
@@ -61,6 +61,7 @@ func (s *Shard) FlushWriteCache(ctx context.Context, p FlushWriteCachePrm) error
type SealWriteCachePrm struct {
IgnoreErrors bool
RestoreMode bool
+ Shrink bool
}
// SealWriteCache flushes all data from the write-cache and moves it to degraded read only mode.
@@ -87,5 +88,5 @@ func (s *Shard) SealWriteCache(ctx context.Context, p SealWriteCachePrm) error {
return ErrDegradedMode
}
- return s.writeCache.Seal(ctx, writecache.SealPrm{IgnoreErrors: p.IgnoreErrors, RestoreMode: p.RestoreMode})
+ return s.writeCache.Seal(ctx, writecache.SealPrm{IgnoreErrors: p.IgnoreErrors, RestoreMode: p.RestoreMode, Shrink: p.Shrink})
}
diff --git a/pkg/local_object_storage/writecache/flush.go b/pkg/local_object_storage/writecache/flush.go
index da7feda9a..e34f5a76b 100644
--- a/pkg/local_object_storage/writecache/flush.go
+++ b/pkg/local_object_storage/writecache/flush.go
@@ -291,7 +291,7 @@ func (c *cache) Flush(ctx context.Context, ignoreErrors, seal bool) error {
if seal {
m := c.mode | mode.ReadOnly
- if err := c.setMode(ctx, m, ignoreErrors); err != nil {
+ if err := c.setMode(ctx, m, setModePrm{ignoreErrors: ignoreErrors}); err != nil {
return err
}
c.metrics.SetMode(mode.ConvertToComponentModeDegraded(m))
diff --git a/pkg/local_object_storage/writecache/mode.go b/pkg/local_object_storage/writecache/mode.go
index 4172cfbc8..44da9b36e 100644
--- a/pkg/local_object_storage/writecache/mode.go
+++ b/pkg/local_object_storage/writecache/mode.go
@@ -2,16 +2,25 @@ package writecache
import (
"context"
+ "errors"
"fmt"
+ "os"
+ "path/filepath"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ "go.etcd.io/bbolt"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
)
+type setModePrm struct {
+ ignoreErrors bool
+ shrink bool
+}
+
// SetMode sets write-cache mode of operation.
// When shard is put in read-only mode all objects in memory are flushed to disk
// and all background jobs are suspended.
@@ -25,7 +34,7 @@ func (c *cache) SetMode(m mode.Mode) error {
c.modeMtx.Lock()
defer c.modeMtx.Unlock()
- err := c.setMode(ctx, m, true)
+ err := c.setMode(ctx, m, setModePrm{ignoreErrors: true})
if err == nil {
c.metrics.SetMode(mode.ConvertToComponentModeDegraded(m))
}
@@ -33,21 +42,19 @@ func (c *cache) SetMode(m mode.Mode) error {
}
// setMode applies new mode. Must be called with cache.modeMtx lock taken.
-func (c *cache) setMode(ctx context.Context, m mode.Mode, ignoreErrors bool) error {
+func (c *cache) setMode(ctx context.Context, m mode.Mode, prm setModePrm) error {
var err error
turnOffMeta := m.NoMetabase()
if turnOffMeta && !c.mode.NoMetabase() {
- err = c.flush(ctx, ignoreErrors)
+ err = c.flush(ctx, prm.ignoreErrors)
if err != nil {
return err
}
}
- if c.db != nil {
- if err = c.db.Close(); err != nil {
- return fmt.Errorf("can't close write-cache database: %w", err)
- }
+ if err := c.closeDB(prm.shrink); err != nil {
+ return err
}
// Suspend producers to ensure there are channel send operations in fly.
@@ -71,6 +78,40 @@ func (c *cache) setMode(ctx context.Context, m mode.Mode, ignoreErrors bool) err
return nil
}
+func (c *cache) closeDB(shrink bool) error {
+ if c.db == nil {
+ return nil
+ }
+ if !shrink {
+ if err := c.db.Close(); err != nil {
+ return fmt.Errorf("can't close write-cache database: %w", err)
+ }
+ return nil
+ }
+
+ var empty bool
+ err := c.db.View(func(tx *bbolt.Tx) error {
+ b := tx.Bucket(defaultBucket)
+ empty = b == nil || b.Stats().KeyN == 0
+ return nil
+ })
+ if err != nil && !errors.Is(err, bbolt.ErrDatabaseNotOpen) {
+ return fmt.Errorf("failed to check DB items: %w", err)
+ }
+ if err := c.db.Close(); err != nil {
+ return fmt.Errorf("can't close write-cache database: %w", err)
+ }
+ if empty {
+ err := os.Remove(filepath.Join(c.path, dbName))
+ if err != nil && !os.IsNotExist(err) {
+ return fmt.Errorf("failed to remove DB file: %w", err)
+ }
+ } else {
+ c.log.Info(logs.WritecacheShrinkSkippedNotEmpty)
+ }
+ return nil
+}
+
// readOnly returns true if current mode is read-only.
// `c.modeMtx` must be taken.
func (c *cache) readOnly() bool {
diff --git a/pkg/local_object_storage/writecache/seal.go b/pkg/local_object_storage/writecache/seal.go
index 22b4e0988..fa224f5e0 100644
--- a/pkg/local_object_storage/writecache/seal.go
+++ b/pkg/local_object_storage/writecache/seal.go
@@ -22,13 +22,13 @@ func (c *cache) Seal(ctx context.Context, prm SealPrm) error {
sourceMode := c.mode
// flush will be done by setMode
- err := c.setMode(ctx, mode.DegradedReadOnly, prm.IgnoreErrors)
+ err := c.setMode(ctx, mode.DegradedReadOnly, setModePrm{ignoreErrors: prm.IgnoreErrors, shrink: prm.Shrink})
if err != nil {
return err
}
c.metrics.SetMode(mode.ComponentDisabled)
if prm.RestoreMode {
- err = c.setMode(ctx, sourceMode, prm.IgnoreErrors)
+ err = c.setMode(ctx, sourceMode, setModePrm{ignoreErrors: prm.IgnoreErrors})
if err == nil {
c.metrics.SetMode(mode.ConvertToComponentMode(sourceMode))
}
diff --git a/pkg/local_object_storage/writecache/writecache.go b/pkg/local_object_storage/writecache/writecache.go
index 7085a57b2..a973df604 100644
--- a/pkg/local_object_storage/writecache/writecache.go
+++ b/pkg/local_object_storage/writecache/writecache.go
@@ -23,6 +23,7 @@ type Info struct {
type SealPrm struct {
IgnoreErrors bool
RestoreMode bool
+ Shrink bool
}
// Cache represents write-cache for objects.
diff --git a/pkg/services/control/server/seal_writecache.go b/pkg/services/control/server/seal_writecache.go
index b663cfc81..697b91918 100644
--- a/pkg/services/control/server/seal_writecache.go
+++ b/pkg/services/control/server/seal_writecache.go
@@ -20,6 +20,7 @@ func (s *Server) SealWriteCache(ctx context.Context, req *control.SealWriteCache
ShardIDs: s.getShardIDList(req.GetBody().GetShard_ID()),
IgnoreErrors: req.GetBody().GetIgnoreErrors(),
RestoreMode: req.GetBody().GetRestoreMode(),
+ Shrink: req.GetBody().GetShrink(),
}
res, err := s.s.SealWriteCache(ctx, prm)
diff --git a/pkg/services/control/service.pb.go b/pkg/services/control/service.pb.go
index 895b74368..ac512f1a5 100644
--- a/pkg/services/control/service.pb.go
+++ b/pkg/services/control/service.pb.go
@@ -4729,6 +4729,8 @@ type SealWriteCacheRequest_Body struct {
IgnoreErrors bool `protobuf:"varint,2,opt,name=ignore_errors,json=ignoreErrors,proto3" json:"ignore_errors,omitempty"`
// If true, then writecache will be sealed, but mode will be restored to the current one.
RestoreMode bool `protobuf:"varint,4,opt,name=restore_mode,json=restoreMode,proto3" json:"restore_mode,omitempty"`
+ // If true, then writecache will shrink internal storage.
+ Shrink bool `protobuf:"varint,5,opt,name=shrink,proto3" json:"shrink,omitempty"`
}
func (x *SealWriteCacheRequest_Body) Reset() {
@@ -4784,6 +4786,13 @@ func (x *SealWriteCacheRequest_Body) GetRestoreMode() bool {
return false
}
+func (x *SealWriteCacheRequest_Body) GetShrink() bool {
+ if x != nil {
+ return x.Shrink
+ }
+ return false
+}
+
type SealWriteCacheResponse_Body struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -5491,7 +5500,7 @@ var file_pkg_services_control_service_proto_rawDesc = []byte{
0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02,
0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53,
0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74,
- 0x75, 0x72, 0x65, 0x1a, 0x06, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x22, 0xed, 0x01, 0x0a, 0x15,
+ 0x75, 0x72, 0x65, 0x1a, 0x06, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x22, 0x86, 0x02, 0x0a, 0x15,
0x53, 0x65, 0x61, 0x6c, 0x57, 0x72, 0x69, 0x74, 0x65, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65,
0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x37, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20,
0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x65,
@@ -5500,184 +5509,186 @@ var file_pkg_services_control_service_proto_rawDesc = []byte{
0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e,
0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65,
- 0x1a, 0x69, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x68, 0x61, 0x72,
- 0x64, 0x5f, 0x49, 0x44, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x07, 0x73, 0x68, 0x61, 0x72,
- 0x64, 0x49, 0x44, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x65, 0x72,
- 0x72, 0x6f, 0x72, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x69, 0x67, 0x6e, 0x6f,
- 0x72, 0x65, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x65, 0x73, 0x74,
- 0x6f, 0x72, 0x65, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b,
- 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x22, 0xa9, 0x02, 0x0a, 0x16,
+ 0x1a, 0x81, 0x01, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x68, 0x61,
+ 0x72, 0x64, 0x5f, 0x49, 0x44, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x07, 0x73, 0x68, 0x61,
+ 0x72, 0x64, 0x49, 0x44, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x65,
+ 0x72, 0x72, 0x6f, 0x72, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x69, 0x67, 0x6e,
+ 0x6f, 0x72, 0x65, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x65, 0x73,
+ 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52,
+ 0x0b, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x16, 0x0a, 0x06,
+ 0x73, 0x68, 0x72, 0x69, 0x6e, 0x6b, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x73, 0x68,
+ 0x72, 0x69, 0x6e, 0x6b, 0x22, 0xa9, 0x02, 0x0a, 0x16, 0x53, 0x65, 0x61, 0x6c, 0x57, 0x72, 0x69,
+ 0x74, 0x65, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
+ 0x38, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e,
+ 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x65, 0x61, 0x6c, 0x57, 0x72, 0x69, 0x74,
+ 0x65, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42,
+ 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67,
+ 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63,
+ 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65,
+ 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0xa2, 0x01, 0x0a, 0x04,
+ 0x42, 0x6f, 0x64, 0x79, 0x12, 0x45, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18,
+ 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e,
0x53, 0x65, 0x61, 0x6c, 0x57, 0x72, 0x69, 0x74, 0x65, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65,
- 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x38, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53,
- 0x65, 0x61, 0x6c, 0x57, 0x72, 0x69, 0x74, 0x65, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79,
- 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69,
- 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75,
- 0x72, 0x65, 0x1a, 0xa2, 0x01, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x45, 0x0a, 0x07, 0x72,
- 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x63,
- 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x65, 0x61, 0x6c, 0x57, 0x72, 0x69, 0x74, 0x65,
- 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f,
- 0x64, 0x79, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c,
- 0x74, 0x73, 0x1a, 0x53, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x19, 0x0a, 0x08,
- 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07,
- 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65,
- 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73,
- 0x73, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0xa1, 0x01, 0x0a, 0x13, 0x44, 0x65, 0x74, 0x61,
- 0x63, 0x68, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
- 0x35, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e,
- 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x44, 0x65, 0x74, 0x61, 0x63, 0x68, 0x53, 0x68,
- 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79,
- 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74,
- 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74,
- 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73,
- 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x21, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79,
- 0x12, 0x19, 0x0a, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x49, 0x44, 0x18, 0x01, 0x20, 0x03,
- 0x28, 0x0c, 0x52, 0x07, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x44, 0x22, 0x88, 0x01, 0x0a, 0x14,
- 0x44, 0x65, 0x74, 0x61, 0x63, 0x68, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70,
- 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x36, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x44, 0x65, 0x74,
- 0x61, 0x63, 0x68, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
- 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09,
- 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74,
- 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x06,
- 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x32, 0x8b, 0x10, 0x0a, 0x0e, 0x43, 0x6f, 0x6e, 0x74, 0x72,
- 0x6f, 0x6c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x48, 0x0a, 0x0b, 0x48, 0x65, 0x61,
- 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x1b, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72,
- 0x6f, 0x6c, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e,
- 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f,
- 0x6e, 0x73, 0x65, 0x12, 0x54, 0x0a, 0x0f, 0x53, 0x65, 0x74, 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70,
- 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c,
- 0x2e, 0x53, 0x65, 0x74, 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f,
- 0x6c, 0x2e, 0x53, 0x65, 0x74, 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75,
- 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x54, 0x0a, 0x0f, 0x47, 0x65, 0x74,
- 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1f, 0x2e, 0x63,
- 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70,
- 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e,
- 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x65, 0x74, 0x6d, 0x61,
- 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
- 0x48, 0x0a, 0x0b, 0x44, 0x72, 0x6f, 0x70, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x1b,
- 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x44, 0x72, 0x6f, 0x70, 0x4f, 0x62, 0x6a,
- 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x63, 0x6f,
- 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x44, 0x72, 0x6f, 0x70, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74,
- 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x45, 0x0a, 0x0a, 0x4c, 0x69, 0x73,
- 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x1a, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f,
- 0x6c, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x4c, 0x69,
- 0x73, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
- 0x12, 0x4b, 0x0a, 0x0c, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x4d, 0x6f, 0x64, 0x65,
- 0x12, 0x1c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x65, 0x74, 0x53, 0x68,
- 0x61, 0x72, 0x64, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d,
- 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72,
- 0x64, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x54, 0x0a,
- 0x0f, 0x53, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x65, 0x54, 0x72, 0x65, 0x65,
- 0x12, 0x1f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x68,
- 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x65, 0x54, 0x72, 0x65, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x1a, 0x20, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x79, 0x6e, 0x63,
- 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x65, 0x54, 0x72, 0x65, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f,
- 0x6e, 0x73, 0x65, 0x12, 0x4e, 0x0a, 0x0d, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x65, 0x53,
- 0x68, 0x61, 0x72, 0x64, 0x12, 0x1d, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x45,
- 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x45, 0x76,
- 0x61, 0x63, 0x75, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f,
- 0x6e, 0x73, 0x65, 0x12, 0x63, 0x0a, 0x14, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x68, 0x61, 0x72,
- 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x24, 0x2e, 0x63, 0x6f,
- 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64,
- 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x1a, 0x25, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x74, 0x61, 0x72,
- 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6f, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x53,
- 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74,
- 0x61, 0x74, 0x75, 0x73, 0x12, 0x28, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x47,
- 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29,
- 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x2e, 0x53, 0x74, 0x61, 0x74,
+ 0x75, 0x73, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x1a, 0x53, 0x0a, 0x06, 0x53,
+ 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x49,
+ 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x44,
+ 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x08, 0x52, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72,
+ 0x72, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72,
+ 0x22, 0xa1, 0x01, 0x0a, 0x13, 0x44, 0x65, 0x74, 0x61, 0x63, 0x68, 0x53, 0x68, 0x61, 0x72, 0x64,
+ 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x35, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c,
+ 0x2e, 0x44, 0x65, 0x74, 0x61, 0x63, 0x68, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12,
+ 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67,
+ 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72,
+ 0x65, 0x1a, 0x21, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x68, 0x61,
+ 0x72, 0x64, 0x5f, 0x49, 0x44, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x07, 0x73, 0x68, 0x61,
+ 0x72, 0x64, 0x49, 0x44, 0x22, 0x88, 0x01, 0x0a, 0x14, 0x44, 0x65, 0x74, 0x61, 0x63, 0x68, 0x53,
+ 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x36, 0x0a,
+ 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x63, 0x6f,
+ 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x44, 0x65, 0x74, 0x61, 0x63, 0x68, 0x53, 0x68, 0x61, 0x72,
+ 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52,
+ 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75,
+ 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72,
+ 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69,
+ 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x06, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x32,
+ 0x8b, 0x10, 0x0a, 0x0e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x53, 0x65, 0x72, 0x76, 0x69,
+ 0x63, 0x65, 0x12, 0x48, 0x0a, 0x0b, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63,
+ 0x6b, 0x12, 0x1b, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x48, 0x65, 0x61, 0x6c,
+ 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c,
+ 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43,
+ 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x54, 0x0a, 0x0f,
+ 0x53, 0x65, 0x74, 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12,
+ 0x1f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x65, 0x74, 0x4e, 0x65, 0x74,
+ 0x6d, 0x61, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x1a, 0x20, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x65, 0x74, 0x4e, 0x65,
+ 0x74, 0x6d, 0x61, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x12, 0x54, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x53,
+ 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e,
+ 0x47, 0x65, 0x74, 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c,
+ 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x48, 0x0a, 0x0b, 0x44, 0x72, 0x6f, 0x70,
+ 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x1b, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f,
+ 0x6c, 0x2e, 0x44, 0x72, 0x6f, 0x70, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x44,
+ 0x72, 0x6f, 0x70, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x12, 0x45, 0x0a, 0x0a, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73,
+ 0x12, 0x1a, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53,
+ 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x63,
+ 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64,
+ 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4b, 0x0a, 0x0c, 0x53, 0x65, 0x74,
+ 0x53, 0x68, 0x61, 0x72, 0x64, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x1c, 0x2e, 0x63, 0x6f, 0x6e, 0x74,
+ 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x4d, 0x6f, 0x64, 0x65,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f,
+ 0x6c, 0x2e, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x54, 0x0a, 0x0f, 0x53, 0x79, 0x6e, 0x63, 0x68, 0x72,
+ 0x6f, 0x6e, 0x69, 0x7a, 0x65, 0x54, 0x72, 0x65, 0x65, 0x12, 0x1f, 0x2e, 0x63, 0x6f, 0x6e, 0x74,
+ 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x65, 0x54,
+ 0x72, 0x65, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x63, 0x6f, 0x6e,
+ 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x65,
+ 0x54, 0x72, 0x65, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4e, 0x0a, 0x0d,
+ 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x1d, 0x2e,
+ 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x65,
+ 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x63,
+ 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x65, 0x53,
+ 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x63, 0x0a, 0x14,
+ 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x24, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53,
+ 0x74, 0x61, 0x72, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x63, 0x6f, 0x6e,
+ 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45,
+ 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x12, 0x6f, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61,
+ 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x28, 0x2e,
+ 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64,
+ 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f,
+ 0x6c, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x12, 0x75, 0x0a, 0x1a, 0x52, 0x65, 0x73, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64,
+ 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
+ 0x12, 0x2a, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x74,
+ 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53,
+ 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x63,
+ 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72,
0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75,
- 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x75, 0x0a, 0x1a, 0x52, 0x65, 0x73,
- 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2a, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f,
- 0x6c, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63,
- 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x52, 0x65,
- 0x73, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
- 0x12, 0x60, 0x0a, 0x13, 0x53, 0x74, 0x6f, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61,
- 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f,
- 0x6c, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x63,
- 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64,
- 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x12, 0x45, 0x0a, 0x0a, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x43, 0x61, 0x63, 0x68, 0x65,
- 0x12, 0x1a, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x46, 0x6c, 0x75, 0x73, 0x68,
- 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x63,
- 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x43, 0x61, 0x63, 0x68,
- 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x39, 0x0a, 0x06, 0x44, 0x6f, 0x63,
- 0x74, 0x6f, 0x72, 0x12, 0x16, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x44, 0x6f,
- 0x63, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x63, 0x6f,
- 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x44, 0x6f, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70,
- 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x66, 0x0a, 0x15, 0x41, 0x64, 0x64, 0x43, 0x68, 0x61, 0x69, 0x6e,
- 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x12, 0x25, 0x2e,
+ 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x60, 0x0a, 0x13, 0x53, 0x74, 0x6f,
+ 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x12, 0x23, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x53,
+ 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e,
+ 0x53, 0x74, 0x6f, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x45, 0x0a, 0x0a, 0x46,
+ 0x6c, 0x75, 0x73, 0x68, 0x43, 0x61, 0x63, 0x68, 0x65, 0x12, 0x1a, 0x2e, 0x63, 0x6f, 0x6e, 0x74,
+ 0x72, 0x6f, 0x6c, 0x2e, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e,
+ 0x46, 0x6c, 0x75, 0x73, 0x68, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x12, 0x39, 0x0a, 0x06, 0x44, 0x6f, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x16, 0x2e, 0x63,
+ 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x44, 0x6f, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x44,
+ 0x6f, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x66, 0x0a,
+ 0x15, 0x41, 0x64, 0x64, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76,
+ 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x12, 0x25, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c,
+ 0x2e, 0x41, 0x64, 0x64, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76,
+ 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e,
0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x41, 0x64, 0x64, 0x43, 0x68, 0x61, 0x69, 0x6e,
- 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x41,
- 0x64, 0x64, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72,
- 0x72, 0x69, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x66, 0x0a, 0x15,
+ 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x66, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x43, 0x68, 0x61, 0x69,
+ 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x12, 0x25,
+ 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x68, 0x61, 0x69,
+ 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e,
0x47, 0x65, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65,
- 0x72, 0x72, 0x69, 0x64, 0x65, 0x12, 0x25, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e,
- 0x47, 0x65, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65,
- 0x72, 0x72, 0x69, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x63,
- 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c,
- 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70,
- 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6c, 0x0a, 0x17, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x68, 0x61, 0x69,
- 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x12,
- 0x27, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x68,
- 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65,
- 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72,
+ 0x72, 0x72, 0x69, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6c, 0x0a,
+ 0x17, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f,
+ 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x12, 0x27, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72,
0x6f, 0x6c, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61,
- 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x12, 0x6f, 0x0a, 0x18, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x43, 0x68, 0x61, 0x69,
- 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x12, 0x28,
- 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x43,
- 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64,
- 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72,
- 0x6f, 0x6c, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f,
- 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f,
- 0x6e, 0x73, 0x65, 0x12, 0x8a, 0x01, 0x0a, 0x21, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x43, 0x68,
- 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65,
- 0x73, 0x42, 0x79, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x74,
- 0x72, 0x6f, 0x6c, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c,
- 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x42, 0x79, 0x54,
- 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x32, 0x2e, 0x63,
- 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x43, 0x68, 0x61,
- 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73,
- 0x42, 0x79, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
- 0x12, 0x72, 0x0a, 0x19, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x4c,
- 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x12, 0x29, 0x2e,
- 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x61, 0x72, 0x67,
- 0x65, 0x74, 0x73, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65,
- 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72,
- 0x6f, 0x6c, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x4c, 0x6f,
- 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70,
- 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x51, 0x0a, 0x0e, 0x53, 0x65, 0x61, 0x6c, 0x57, 0x72, 0x69, 0x74,
- 0x65, 0x43, 0x61, 0x63, 0x68, 0x65, 0x12, 0x1e, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c,
- 0x2e, 0x53, 0x65, 0x61, 0x6c, 0x57, 0x72, 0x69, 0x74, 0x65, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c,
- 0x2e, 0x53, 0x65, 0x61, 0x6c, 0x57, 0x72, 0x69, 0x74, 0x65, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52,
- 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4b, 0x0a, 0x0c, 0x44, 0x65, 0x74, 0x61, 0x63,
- 0x68, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x1c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f,
- 0x6c, 0x2e, 0x44, 0x65, 0x74, 0x61, 0x63, 0x68, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e,
- 0x44, 0x65, 0x74, 0x61, 0x63, 0x68, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70,
- 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x41, 0x5a, 0x3f, 0x67, 0x69, 0x74, 0x2e, 0x66, 0x72, 0x6f, 0x73,
- 0x74, 0x66, 0x73, 0x2e, 0x69, 0x6e, 0x66, 0x6f, 0x2f, 0x54, 0x72, 0x75, 0x65, 0x43, 0x6c, 0x6f,
- 0x75, 0x64, 0x4c, 0x61, 0x62, 0x2f, 0x66, 0x72, 0x6f, 0x73, 0x74, 0x66, 0x73, 0x2d, 0x6e, 0x6f,
- 0x64, 0x65, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f,
- 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+ 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x1a, 0x28, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x4c, 0x69, 0x73, 0x74,
+ 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69,
+ 0x64, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6f, 0x0a, 0x18, 0x52,
+ 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f,
+ 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x12, 0x28, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f,
+ 0x6c, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63,
+ 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x1a, 0x29, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x52, 0x65, 0x6d, 0x6f,
+ 0x76, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72,
+ 0x72, 0x69, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x8a, 0x01, 0x0a,
+ 0x21, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61,
+ 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x42, 0x79, 0x54, 0x61, 0x72, 0x67,
+ 0x65, 0x74, 0x12, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x52, 0x65, 0x6d,
+ 0x6f, 0x76, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65,
+ 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x42, 0x79, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x32, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e,
+ 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c,
+ 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x42, 0x79, 0x54, 0x61, 0x72, 0x67, 0x65,
+ 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x72, 0x0a, 0x19, 0x4c, 0x69, 0x73,
+ 0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65,
+ 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x12, 0x29, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c,
+ 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x4c, 0x6f, 0x63, 0x61,
+ 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x1a, 0x2a, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x4c, 0x69, 0x73, 0x74,
+ 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72,
+ 0x72, 0x69, 0x64, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x51, 0x0a,
+ 0x0e, 0x53, 0x65, 0x61, 0x6c, 0x57, 0x72, 0x69, 0x74, 0x65, 0x43, 0x61, 0x63, 0x68, 0x65, 0x12,
+ 0x1e, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x65, 0x61, 0x6c, 0x57, 0x72,
+ 0x69, 0x74, 0x65, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
+ 0x1f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x65, 0x61, 0x6c, 0x57, 0x72,
+ 0x69, 0x74, 0x65, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x12, 0x4b, 0x0a, 0x0c, 0x44, 0x65, 0x74, 0x61, 0x63, 0x68, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73,
+ 0x12, 0x1c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x44, 0x65, 0x74, 0x61, 0x63,
+ 0x68, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d,
+ 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x44, 0x65, 0x74, 0x61, 0x63, 0x68, 0x53,
+ 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x41, 0x5a,
+ 0x3f, 0x67, 0x69, 0x74, 0x2e, 0x66, 0x72, 0x6f, 0x73, 0x74, 0x66, 0x73, 0x2e, 0x69, 0x6e, 0x66,
+ 0x6f, 0x2f, 0x54, 0x72, 0x75, 0x65, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x4c, 0x61, 0x62, 0x2f, 0x66,
+ 0x72, 0x6f, 0x73, 0x74, 0x66, 0x73, 0x2d, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x70, 0x6b, 0x67, 0x2f,
+ 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c,
+ 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
diff --git a/pkg/services/control/service.proto b/pkg/services/control/service.proto
index a10410025..486f30a93 100644
--- a/pkg/services/control/service.proto
+++ b/pkg/services/control/service.proto
@@ -658,6 +658,9 @@ message SealWriteCacheRequest {
// If true, then writecache will be sealed, but mode will be restored to the current one.
bool restore_mode = 4;
+
+ // If true, then writecache will shrink internal storage.
+ bool shrink = 5;
}
Body body = 1;
diff --git a/pkg/services/control/service_frostfs.pb.go b/pkg/services/control/service_frostfs.pb.go
index b6b064973..417d25c05 100644
--- a/pkg/services/control/service_frostfs.pb.go
+++ b/pkg/services/control/service_frostfs.pb.go
@@ -3423,6 +3423,7 @@ func (x *SealWriteCacheRequest_Body) StableSize() (size int) {
size += proto.RepeatedBytesSize(1, x.Shard_ID)
size += proto.BoolSize(2, x.IgnoreErrors)
size += proto.BoolSize(4, x.RestoreMode)
+ size += proto.BoolSize(5, x.Shrink)
return size
}
@@ -3445,6 +3446,7 @@ func (x *SealWriteCacheRequest_Body) StableMarshal(buf []byte) []byte {
offset += proto.RepeatedBytesMarshal(1, buf[offset:], x.Shard_ID)
offset += proto.BoolMarshal(2, buf[offset:], x.IgnoreErrors)
offset += proto.BoolMarshal(4, buf[offset:], x.RestoreMode)
+ offset += proto.BoolMarshal(5, buf[offset:], x.Shrink)
return buf
}
From 08b1f18bca97c100831b8ded4d9e6633a5aae138 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Tue, 6 Aug 2024 15:09:12 +0300
Subject: [PATCH 025/705] [#1296] writecache: Add count limit
Signed-off-by: Dmitrii Stepanov
---
cmd/frostfs-node/config.go | 3 +++
cmd/frostfs-node/config/engine/config_test.go | 3 +++
.../config/engine/shard/writecache/config.go | 19 ++++++++++++++++++
config/example/node.env | 1 +
config/example/node.json | 3 ++-
config/example/node.yaml | 1 +
docs/storage-node-configuration.md | 19 +++++++++---------
.../writecache/options.go | 10 ++++++++++
pkg/local_object_storage/writecache/put.go | 6 ++----
pkg/local_object_storage/writecache/state.go | 20 +++++++++++++------
10 files changed, 65 insertions(+), 20 deletions(-)
diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go
index 5b91e7819..a3dd80684 100644
--- a/cmd/frostfs-node/config.go
+++ b/cmd/frostfs-node/config.go
@@ -152,6 +152,7 @@ type shardCfg struct {
maxObjSize uint64
flushWorkerCount int
sizeLimit uint64
+ countLimit uint64
noSync bool
}
@@ -275,6 +276,7 @@ func (a *applicationConfiguration) setShardWriteCacheConfig(newConfig *shardCfg,
wc.smallObjectSize = writeCacheCfg.SmallObjectSize()
wc.flushWorkerCount = writeCacheCfg.WorkerCount()
wc.sizeLimit = writeCacheCfg.SizeLimit()
+ wc.countLimit = writeCacheCfg.CountLimit()
wc.noSync = writeCacheCfg.NoSync()
}
}
@@ -867,6 +869,7 @@ func (c *cfg) getWriteCacheOpts(shCfg shardCfg) []writecache.Option {
writecache.WithSmallObjectSize(wcRead.smallObjectSize),
writecache.WithFlushWorkersCount(wcRead.flushWorkerCount),
writecache.WithMaxCacheSize(wcRead.sizeLimit),
+ writecache.WithMaxCacheCount(wcRead.countLimit),
writecache.WithNoSync(wcRead.noSync),
writecache.WithLogger(c.log),
)
diff --git a/cmd/frostfs-node/config/engine/config_test.go b/cmd/frostfs-node/config/engine/config_test.go
index 7473afefb..da4eeabfd 100644
--- a/cmd/frostfs-node/config/engine/config_test.go
+++ b/cmd/frostfs-node/config/engine/config_test.go
@@ -12,6 +12,7 @@ import (
fstreeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/fstree"
gcconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/gc"
piloramaconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/pilorama"
+ writecacheconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/writecache"
configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"github.com/stretchr/testify/require"
@@ -78,6 +79,7 @@ func TestEngineSection(t *testing.T) {
require.EqualValues(t, 134217728, wc.MaxObjectSize())
require.EqualValues(t, 30, wc.WorkerCount())
require.EqualValues(t, 3221225472, wc.SizeLimit())
+ require.EqualValues(t, 49, wc.CountLimit())
require.Equal(t, "tmp/0/meta", meta.Path())
require.Equal(t, fs.FileMode(0o644), meta.BoltDB().Perm())
@@ -133,6 +135,7 @@ func TestEngineSection(t *testing.T) {
require.EqualValues(t, 134217728, wc.MaxObjectSize())
require.EqualValues(t, 30, wc.WorkerCount())
require.EqualValues(t, 4294967296, wc.SizeLimit())
+ require.EqualValues(t, writecacheconfig.CountLimitDefault, wc.CountLimit())
require.Equal(t, "tmp/1/meta", meta.Path())
require.Equal(t, fs.FileMode(0o644), meta.BoltDB().Perm())
diff --git a/cmd/frostfs-node/config/engine/shard/writecache/config.go b/cmd/frostfs-node/config/engine/shard/writecache/config.go
index 5e31e04ad..bfe8144df 100644
--- a/cmd/frostfs-node/config/engine/shard/writecache/config.go
+++ b/cmd/frostfs-node/config/engine/shard/writecache/config.go
@@ -21,6 +21,9 @@ const (
// SizeLimitDefault is a default write-cache size limit.
SizeLimitDefault = 1 << 30
+
+ // CountLimitDefault is a default write-cache count limit.
+ CountLimitDefault = 0
)
// From wraps config section into Config.
@@ -115,6 +118,22 @@ func (x *Config) SizeLimit() uint64 {
return SizeLimitDefault
}
+// CountLimit returns the value of "max_object_count" config parameter.
+//
+// Returns CountLimitDefault if the value is not a positive number.
+func (x *Config) CountLimit() uint64 {
+ c := config.SizeInBytesSafe(
+ (*config.Config)(x),
+ "max_object_count",
+ )
+
+ if c > 0 {
+ return c
+ }
+
+ return CountLimitDefault
+}
+
// NoSync returns the value of "no_sync" config parameter.
//
// Returns false if the value is not a boolean.
diff --git a/config/example/node.env b/config/example/node.env
index 00190eb39..fc42cc3bf 100644
--- a/config/example/node.env
+++ b/config/example/node.env
@@ -105,6 +105,7 @@ FROSTFS_STORAGE_SHARD_0_WRITECACHE_SMALL_OBJECT_SIZE=16384
FROSTFS_STORAGE_SHARD_0_WRITECACHE_MAX_OBJECT_SIZE=134217728
FROSTFS_STORAGE_SHARD_0_WRITECACHE_FLUSH_WORKER_COUNT=30
FROSTFS_STORAGE_SHARD_0_WRITECACHE_CAPACITY=3221225472
+FROSTFS_STORAGE_SHARD_0_WRITECACHE_MAX_OBJECT_COUNT=49
### Metabase config
FROSTFS_STORAGE_SHARD_0_METABASE_PATH=tmp/0/meta
FROSTFS_STORAGE_SHARD_0_METABASE_PERM=0644
diff --git a/config/example/node.json b/config/example/node.json
index 9051d2bb7..9aa0dac53 100644
--- a/config/example/node.json
+++ b/config/example/node.json
@@ -148,7 +148,8 @@
"small_object_size": 16384,
"max_object_size": 134217728,
"flush_worker_count": 30,
- "capacity": 3221225472
+ "capacity": 3221225472,
+ "max_object_count": 49
},
"metabase": {
"path": "tmp/0/meta",
diff --git a/config/example/node.yaml b/config/example/node.yaml
index bcc8552b3..2e6affae9 100644
--- a/config/example/node.yaml
+++ b/config/example/node.yaml
@@ -171,6 +171,7 @@ storage:
no_sync: true
path: tmp/0/cache # write-cache root directory
capacity: 3221225472 # approximate write-cache total size, bytes
+ max_object_count: 49
metabase:
path: tmp/0/meta # metabase path
diff --git a/docs/storage-node-configuration.md b/docs/storage-node-configuration.md
index 4a6e5ba6d..424172ff5 100644
--- a/docs/storage-node-configuration.md
+++ b/docs/storage-node-configuration.md
@@ -292,15 +292,16 @@ writecache:
flush_worker_count: 30
```
-| Parameter | Type | Default value | Description |
-|----------------------|------------|---------------|----------------------------------------------------------------------------------------------------------------------|
-| `path` | `string` | | Path to the metabase file. |
-| `capacity` | `size` | unrestricted | Approximate maximum size of the writecache. If the writecache is full, objects are written to the blobstor directly. |
-| `small_object_size` | `size` | `32K` | Maximum object size for "small" objects. This objects are stored in a key-value database instead of a file-system. |
-| `max_object_size` | `size` | `64M` | Maximum object size allowed to be stored in the writecache. |
-| `flush_worker_count` | `int` | `20` | Amount of background workers that move data from the writecache to the blobstor. |
-| `max_batch_size` | `int` | `1000` | Maximum amount of small object `PUT` operations to perform in a single transaction. |
-| `max_batch_delay` | `duration` | `10ms` | Maximum delay before a batch starts. |
+| Parameter | Type | Default value | Description |
+|----------------------|------------|---------------|-------------------------------------------------------------------------------------------------------------------------------|
+| `path` | `string` | | Path to the metabase file. |
+| `capacity` | `size` | `1G` | Approximate maximum size of the writecache. If the writecache is full, objects are written to the blobstor directly. |
+| `max_object_count` | `int` | unrestricted | Approximate maximum objects count in the writecache. If the writecache is full, objects are written to the blobstor directly. |
+| `small_object_size` | `size` | `32K` | Maximum object size for "small" objects. This objects are stored in a key-value database instead of a file-system. |
+| `max_object_size` | `size` | `64M` | Maximum object size allowed to be stored in the writecache. |
+| `flush_worker_count` | `int` | `20` | Amount of background workers that move data from the writecache to the blobstor. |
+| `max_batch_size` | `int` | `1000` | Maximum amount of small object `PUT` operations to perform in a single transaction. |
+| `max_batch_delay` | `duration` | `10ms` | Maximum delay before a batch starts. |
# `node` section
diff --git a/pkg/local_object_storage/writecache/options.go b/pkg/local_object_storage/writecache/options.go
index c8eb1bc45..66cd79bea 100644
--- a/pkg/local_object_storage/writecache/options.go
+++ b/pkg/local_object_storage/writecache/options.go
@@ -29,6 +29,9 @@ type options struct {
// maxCacheSize is the maximum total size of all objects saved in cache (DB + FS).
// 1 GiB by default.
maxCacheSize uint64
+ // maxCacheCount is the maximum total count of all object saved in cache.
+ // 0 (no limit) by default.
+ maxCacheCount uint64
// objCounters contains atomic counters for the number of objects stored in cache.
objCounters counters
// maxBatchSize is the maximum batch size for the small object database.
@@ -108,6 +111,13 @@ func WithMaxCacheSize(sz uint64) Option {
}
}
+// WithMaxCacheCount sets maximum write-cache objects count.
+func WithMaxCacheCount(v uint64) Option {
+ return func(o *options) {
+ o.maxCacheCount = v
+ }
+}
+
// WithMaxBatchSize sets max batch size for the small object database.
func WithMaxBatchSize(sz int) Option {
return func(o *options) {
diff --git a/pkg/local_object_storage/writecache/put.go b/pkg/local_object_storage/writecache/put.go
index 0e419f95b..150399de8 100644
--- a/pkg/local_object_storage/writecache/put.go
+++ b/pkg/local_object_storage/writecache/put.go
@@ -76,8 +76,7 @@ func (c *cache) Put(ctx context.Context, prm common.PutPrm) (common.PutRes, erro
// putSmall persists small objects to the write-cache database and
// pushes the to the flush workers queue.
func (c *cache) putSmall(obj objectInfo) error {
- cacheSize := c.estimateCacheSize()
- if c.maxCacheSize < c.incSizeDB(cacheSize) {
+ if !c.hasEnoughSpaceDB() {
return ErrOutOfSpace
}
@@ -107,8 +106,7 @@ func (c *cache) putSmall(obj objectInfo) error {
// putBig writes object to FSTree and pushes it to the flush workers queue.
func (c *cache) putBig(ctx context.Context, addr string, prm common.PutPrm) error {
- cacheSz := c.estimateCacheSize()
- if c.maxCacheSize < c.incSizeFS(cacheSz) {
+ if !c.hasEnoughSpaceFS() {
return ErrOutOfSpace
}
diff --git a/pkg/local_object_storage/writecache/state.go b/pkg/local_object_storage/writecache/state.go
index bc75aaf27..d03f4a63e 100644
--- a/pkg/local_object_storage/writecache/state.go
+++ b/pkg/local_object_storage/writecache/state.go
@@ -9,7 +9,7 @@ import (
"go.etcd.io/bbolt"
)
-func (c *cache) estimateCacheSize() uint64 {
+func (c *cache) estimateCacheSize() (uint64, uint64) {
dbCount := c.objCounters.DB()
fsCount := c.objCounters.FS()
if fsCount > 0 {
@@ -19,15 +19,23 @@ func (c *cache) estimateCacheSize() uint64 {
fsSize := fsCount * c.maxObjectSize
c.metrics.SetEstimateSize(dbSize, fsSize)
c.metrics.SetActualCounters(dbCount, fsCount)
- return dbSize + fsSize
+ return dbCount + fsCount, dbSize + fsSize
}
-func (c *cache) incSizeDB(sz uint64) uint64 {
- return sz + c.smallObjectSize
+func (c *cache) hasEnoughSpaceDB() bool {
+ return c.hasEnoughSpace(c.smallObjectSize)
}
-func (c *cache) incSizeFS(sz uint64) uint64 {
- return sz + c.maxObjectSize
+func (c *cache) hasEnoughSpaceFS() bool {
+ return c.hasEnoughSpace(c.maxObjectSize)
+}
+
+func (c *cache) hasEnoughSpace(objectSize uint64) bool {
+ count, size := c.estimateCacheSize()
+ if c.maxCacheCount > 0 && count+1 > c.maxCacheCount {
+ return false
+ }
+ return c.maxCacheSize >= size+objectSize
}
var _ fstree.FileCounter = &counters{}
From c985b1198f8ed2ec1ecb6e239503ff4ad3f59439 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Fri, 9 Aug 2024 10:38:45 +0300
Subject: [PATCH 026/705] [#1302] putSvc: Override SuccessAfter for non-regular
objects in EC containers
Signed-off-by: Dmitrii Stepanov
---
pkg/services/object/put/single.go | 4 ++++
pkg/services/object/put/streamer.go | 16 ++++++++++++----
2 files changed, 16 insertions(+), 4 deletions(-)
diff --git a/pkg/services/object/put/single.go b/pkg/services/object/put/single.go
index 6d2f3dba8..9fa8ddb67 100644
--- a/pkg/services/object/put/single.go
+++ b/pkg/services/object/put/single.go
@@ -19,6 +19,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/policy"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal"
svcutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
@@ -229,6 +230,9 @@ func (s *Service) getPutSinglePlacementOptions(obj *objectSDK.Object, copiesNumb
if len(copiesNumber) > 0 && !result.isEC {
result.placementOptions = append(result.placementOptions, placement.WithCopyNumbers(copiesNumber))
}
+ if container.IsECContainer(cnrInfo.Value) && !object.IsECSupported(obj) && !localOnly {
+ result.placementOptions = append(result.placementOptions, placement.SuccessAfter(uint32(policy.ECParityCount(cnrInfo.Value.PlacementPolicy())+1)))
+ }
result.placementOptions = append(result.placementOptions, placement.ForContainer(cnrInfo.Value))
objID, ok := obj.ID()
diff --git a/pkg/services/object/put/streamer.go b/pkg/services/object/put/streamer.go
index 14dae38d5..4e655ed54 100644
--- a/pkg/services/object/put/streamer.go
+++ b/pkg/services/object/put/streamer.go
@@ -10,6 +10,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/policy"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
pkgutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
@@ -212,10 +213,10 @@ func (p *Streamer) newObjectWriter(prm *PutInitPrm) transformer.ObjectWriter {
if container.IsECContainer(prm.cnr) && object.IsECSupported(prm.hdr) {
return p.newECWriter(prm)
}
- return p.newDefaultObjectWriter(prm)
+ return p.newDefaultObjectWriter(prm, false)
}
-func (p *Streamer) newDefaultObjectWriter(prm *PutInitPrm) transformer.ObjectWriter {
+func (p *Streamer) newDefaultObjectWriter(prm *PutInitPrm, forECPlacement bool) transformer.ObjectWriter {
var relay func(context.Context, nodeDesc) error
if p.relay != nil {
relay = func(ctx context.Context, node nodeDesc) error {
@@ -232,9 +233,16 @@ func (p *Streamer) newDefaultObjectWriter(prm *PutInitPrm) transformer.ObjectWri
}
}
+ traverseOpts := prm.traverseOpts
+ if forECPlacement && !prm.common.LocalOnly() {
+ // save non-regular and linking object to EC container.
+ // EC 2.1 -> REP 2, EC 2.2 -> REP 3 etc.
+ traverseOpts = append(traverseOpts, placement.SuccessAfter(uint32(policy.ECParityCount(prm.cnr.PlacementPolicy())+1)))
+ }
+
return &distributedTarget{
cfg: p.cfg,
- placementOpts: prm.traverseOpts,
+ placementOpts: traverseOpts,
nodeTargetInitializer: func(node nodeDesc) preparedObjectTarget {
if node.local {
return localTarget{
@@ -266,7 +274,7 @@ func (p *Streamer) newECWriter(prm *PutInitPrm) transformer.ObjectWriter {
commonPrm: prm.common,
relay: p.relay,
},
- repWriter: p.newDefaultObjectWriter(prm),
+ repWriter: p.newDefaultObjectWriter(prm, true),
}
}
From fa82854af4c0ec18a058eabede3da35715061125 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Thu, 8 Aug 2024 13:15:22 +0300
Subject: [PATCH 027/705] [#1302] writecache: Add put->flush->put benchmark
Signed-off-by: Dmitrii Stepanov
---
.../writecache/benchmark/writecache_test.go | 23 +++++++++++++++++++
1 file changed, 23 insertions(+)
diff --git a/pkg/local_object_storage/writecache/benchmark/writecache_test.go b/pkg/local_object_storage/writecache/benchmark/writecache_test.go
index c1c0e88b3..4f4398452 100644
--- a/pkg/local_object_storage/writecache/benchmark/writecache_test.go
+++ b/pkg/local_object_storage/writecache/benchmark/writecache_test.go
@@ -2,6 +2,7 @@ package benchmark
import (
"context"
+ "fmt"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
@@ -27,6 +28,24 @@ func BenchmarkWritecachePar(b *testing.B) {
})
}
+func BenchmarkWriteAfterDelete(b *testing.B) {
+ const payloadSize = 32 << 10
+ const parallel = 25
+
+ cache := newCache(b)
+ benchmarkPutPrepare(b, cache)
+ b.Run(fmt.Sprintf("%dB_before", payloadSize), func(b *testing.B) {
+ b.SetParallelism(parallel)
+ benchmarkRunPar(b, cache, payloadSize)
+ })
+ require.NoError(b, cache.Flush(context.Background(), false, false))
+ b.Run(fmt.Sprintf("%dB_after", payloadSize), func(b *testing.B) {
+ b.SetParallelism(parallel)
+ benchmarkRunPar(b, cache, payloadSize)
+ })
+ require.NoError(b, cache.Close())
+}
+
func benchmarkPutSeq(b *testing.B, cache writecache.Cache, size uint64) {
benchmarkPutPrepare(b, cache)
defer func() { require.NoError(b, cache.Close()) }()
@@ -54,6 +73,10 @@ func benchmarkPutPar(b *testing.B, cache writecache.Cache, size uint64) {
benchmarkPutPrepare(b, cache)
defer func() { require.NoError(b, cache.Close()) }()
+ benchmarkRunPar(b, cache, size)
+}
+
+func benchmarkRunPar(b *testing.B, cache writecache.Cache, size uint64) {
ctx := context.Background()
b.ResetTimer()
From 68029d756e59d036b11b95153d70e64588ee720e Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Thu, 8 Aug 2024 13:32:18 +0300
Subject: [PATCH 028/705] [#1302] writecache: Allow to specify custom page size
Signed-off-by: Dmitrii Stepanov
---
cmd/frostfs-lens/internal/writecache/inspect.go | 2 +-
cmd/frostfs-lens/internal/writecache/list.go | 2 +-
cmd/frostfs-node/config.go | 3 +++
cmd/frostfs-node/config/engine/config_test.go | 2 ++
cmd/frostfs-node/config/engine/shard/boltdb/boltdb.go | 11 +++++++++++
config/example/node.env | 1 +
config/example/node.json | 1 +
config/example/node.yaml | 1 +
docs/storage-node-configuration.md | 2 ++
pkg/local_object_storage/writecache/options.go | 9 +++++++++
pkg/local_object_storage/writecache/storage.go | 2 +-
pkg/local_object_storage/writecache/util.go | 3 ++-
12 files changed, 35 insertions(+), 4 deletions(-)
diff --git a/cmd/frostfs-lens/internal/writecache/inspect.go b/cmd/frostfs-lens/internal/writecache/inspect.go
index afc986c8b..63c669a35 100644
--- a/cmd/frostfs-lens/internal/writecache/inspect.go
+++ b/cmd/frostfs-lens/internal/writecache/inspect.go
@@ -25,7 +25,7 @@ func init() {
func inspectFunc(cmd *cobra.Command, _ []string) {
var data []byte
- db, err := writecache.OpenDB(vPath, true, os.OpenFile)
+ db, err := writecache.OpenDB(vPath, true, os.OpenFile, 0)
common.ExitOnErr(cmd, common.Errf("could not open write-cache db: %w", err))
defer db.Close()
diff --git a/cmd/frostfs-lens/internal/writecache/list.go b/cmd/frostfs-lens/internal/writecache/list.go
index bcbae0ec9..9c8fa6138 100644
--- a/cmd/frostfs-lens/internal/writecache/list.go
+++ b/cmd/frostfs-lens/internal/writecache/list.go
@@ -31,7 +31,7 @@ func listFunc(cmd *cobra.Command, _ []string) {
return err
}
- db, err := writecache.OpenDB(vPath, true, os.OpenFile)
+ db, err := writecache.OpenDB(vPath, true, os.OpenFile, 0)
common.ExitOnErr(cmd, common.Errf("could not open write-cache db: %w", err))
defer db.Close()
diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go
index a3dd80684..1af27d733 100644
--- a/cmd/frostfs-node/config.go
+++ b/cmd/frostfs-node/config.go
@@ -154,6 +154,7 @@ type shardCfg struct {
sizeLimit uint64
countLimit uint64
noSync bool
+ pageSize int
}
piloramaCfg struct {
@@ -272,6 +273,7 @@ func (a *applicationConfiguration) setShardWriteCacheConfig(newConfig *shardCfg,
wc.path = writeCacheCfg.Path()
wc.maxBatchSize = writeCacheCfg.BoltDB().MaxBatchSize()
wc.maxBatchDelay = writeCacheCfg.BoltDB().MaxBatchDelay()
+ wc.pageSize = writeCacheCfg.BoltDB().PageSize()
wc.maxObjSize = writeCacheCfg.MaxObjectSize()
wc.smallObjectSize = writeCacheCfg.SmallObjectSize()
wc.flushWorkerCount = writeCacheCfg.WorkerCount()
@@ -865,6 +867,7 @@ func (c *cfg) getWriteCacheOpts(shCfg shardCfg) []writecache.Option {
writecache.WithPath(wcRead.path),
writecache.WithMaxBatchSize(wcRead.maxBatchSize),
writecache.WithMaxBatchDelay(wcRead.maxBatchDelay),
+ writecache.WithPageSize(wcRead.pageSize),
writecache.WithMaxObjectSize(wcRead.maxObjSize),
writecache.WithSmallObjectSize(wcRead.smallObjectSize),
writecache.WithFlushWorkersCount(wcRead.flushWorkerCount),
diff --git a/cmd/frostfs-node/config/engine/config_test.go b/cmd/frostfs-node/config/engine/config_test.go
index da4eeabfd..d53207ccc 100644
--- a/cmd/frostfs-node/config/engine/config_test.go
+++ b/cmd/frostfs-node/config/engine/config_test.go
@@ -79,6 +79,7 @@ func TestEngineSection(t *testing.T) {
require.EqualValues(t, 134217728, wc.MaxObjectSize())
require.EqualValues(t, 30, wc.WorkerCount())
require.EqualValues(t, 3221225472, wc.SizeLimit())
+ require.EqualValues(t, 4096, wc.BoltDB().PageSize())
require.EqualValues(t, 49, wc.CountLimit())
require.Equal(t, "tmp/0/meta", meta.Path())
@@ -135,6 +136,7 @@ func TestEngineSection(t *testing.T) {
require.EqualValues(t, 134217728, wc.MaxObjectSize())
require.EqualValues(t, 30, wc.WorkerCount())
require.EqualValues(t, 4294967296, wc.SizeLimit())
+ require.EqualValues(t, 0, wc.BoltDB().PageSize())
require.EqualValues(t, writecacheconfig.CountLimitDefault, wc.CountLimit())
require.Equal(t, "tmp/1/meta", meta.Path())
diff --git a/cmd/frostfs-node/config/engine/shard/boltdb/boltdb.go b/cmd/frostfs-node/config/engine/shard/boltdb/boltdb.go
index 9e334cd8f..a51308b5b 100644
--- a/cmd/frostfs-node/config/engine/shard/boltdb/boltdb.go
+++ b/cmd/frostfs-node/config/engine/shard/boltdb/boltdb.go
@@ -60,3 +60,14 @@ func (x *Config) MaxBatchSize() int {
func (x *Config) NoSync() bool {
return config.BoolSafe((*config.Config)(x), "no_sync")
}
+
+// PageSize returns the value of "page_size" config parameter.
+//
+// Returns 0 if the value is not a positive number.
+func (x *Config) PageSize() int {
+ s := int(config.SizeInBytesSafe((*config.Config)(x), "page_size"))
+ if s < 0 {
+ s = 0
+ }
+ return s
+}
diff --git a/config/example/node.env b/config/example/node.env
index fc42cc3bf..b39423ffb 100644
--- a/config/example/node.env
+++ b/config/example/node.env
@@ -105,6 +105,7 @@ FROSTFS_STORAGE_SHARD_0_WRITECACHE_SMALL_OBJECT_SIZE=16384
FROSTFS_STORAGE_SHARD_0_WRITECACHE_MAX_OBJECT_SIZE=134217728
FROSTFS_STORAGE_SHARD_0_WRITECACHE_FLUSH_WORKER_COUNT=30
FROSTFS_STORAGE_SHARD_0_WRITECACHE_CAPACITY=3221225472
+FROSTFS_STORAGE_SHARD_0_WRITECACHE_PAGE_SIZE=4096
FROSTFS_STORAGE_SHARD_0_WRITECACHE_MAX_OBJECT_COUNT=49
### Metabase config
FROSTFS_STORAGE_SHARD_0_METABASE_PATH=tmp/0/meta
diff --git a/config/example/node.json b/config/example/node.json
index 9aa0dac53..fe2de0e01 100644
--- a/config/example/node.json
+++ b/config/example/node.json
@@ -149,6 +149,7 @@
"max_object_size": 134217728,
"flush_worker_count": 30,
"capacity": 3221225472,
+ "page_size": 4096,
"max_object_count": 49
},
"metabase": {
diff --git a/config/example/node.yaml b/config/example/node.yaml
index 2e6affae9..cc339a427 100644
--- a/config/example/node.yaml
+++ b/config/example/node.yaml
@@ -172,6 +172,7 @@ storage:
path: tmp/0/cache # write-cache root directory
capacity: 3221225472 # approximate write-cache total size, bytes
max_object_count: 49
+ page_size: 4k
metabase:
path: tmp/0/meta # metabase path
diff --git a/docs/storage-node-configuration.md b/docs/storage-node-configuration.md
index 424172ff5..5bf35cd65 100644
--- a/docs/storage-node-configuration.md
+++ b/docs/storage-node-configuration.md
@@ -290,6 +290,7 @@ writecache:
small_object_size: 16384
max_object_size: 134217728
flush_worker_count: 30
+ page_size: '4k'
```
| Parameter | Type | Default value | Description |
@@ -302,6 +303,7 @@ writecache:
| `flush_worker_count` | `int` | `20` | Amount of background workers that move data from the writecache to the blobstor. |
| `max_batch_size` | `int` | `1000` | Maximum amount of small object `PUT` operations to perform in a single transaction. |
| `max_batch_delay` | `duration` | `10ms` | Maximum delay before a batch starts. |
+| `page_size` | `size` | `0` | Page size overrides the default OS page size for small objects storage. Does not affect the existing storage. |
# `node` section
diff --git a/pkg/local_object_storage/writecache/options.go b/pkg/local_object_storage/writecache/options.go
index 66cd79bea..980cf9303 100644
--- a/pkg/local_object_storage/writecache/options.go
+++ b/pkg/local_object_storage/writecache/options.go
@@ -48,6 +48,8 @@ type options struct {
metrics Metrics
// disableBackgroundFlush is for testing purposes only.
disableBackgroundFlush bool
+ // pageSize is bbolt's page size config value
+ pageSize int
}
// WithLogger sets logger.
@@ -173,3 +175,10 @@ func WithDisableBackgroundFlush() Option {
o.disableBackgroundFlush = true
}
}
+
+// WithPageSize sets bbolt's page size.
+func WithPageSize(s int) Option {
+ return func(o *options) {
+ o.pageSize = s
+ }
+}
diff --git a/pkg/local_object_storage/writecache/storage.go b/pkg/local_object_storage/writecache/storage.go
index caf997af8..57021cc17 100644
--- a/pkg/local_object_storage/writecache/storage.go
+++ b/pkg/local_object_storage/writecache/storage.go
@@ -32,7 +32,7 @@ func (c *cache) openStore(mod mode.ComponentMode) error {
return err
}
- c.db, err = OpenDB(c.path, mod.ReadOnly(), c.openFile)
+ c.db, err = OpenDB(c.path, mod.ReadOnly(), c.openFile, c.pageSize)
if err != nil {
return fmt.Errorf("could not open database: %w", err)
}
diff --git a/pkg/local_object_storage/writecache/util.go b/pkg/local_object_storage/writecache/util.go
index 0ed4a954e..ad3b443f3 100644
--- a/pkg/local_object_storage/writecache/util.go
+++ b/pkg/local_object_storage/writecache/util.go
@@ -10,11 +10,12 @@ import (
)
// OpenDB opens BoltDB instance for write-cache. Opens in read-only mode if ro is true.
-func OpenDB(p string, ro bool, openFile func(string, int, fs.FileMode) (*os.File, error)) (*bbolt.DB, error) {
+func OpenDB(p string, ro bool, openFile func(string, int, fs.FileMode) (*os.File, error), pageSize int) (*bbolt.DB, error) {
return bbolt.Open(filepath.Join(p, dbName), os.ModePerm, &bbolt.Options{
NoFreelistSync: true,
ReadOnly: ro,
Timeout: 100 * time.Millisecond,
OpenFile: openFile,
+ PageSize: pageSize,
})
}
From 93d63e1632f54869ca1c155cdd11d2c9966c3090 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Wed, 31 Jul 2024 16:30:07 +0300
Subject: [PATCH 029/705] [#1284] writecache: Allow to seal writecache async
Signed-off-by: Dmitrii Stepanov
---
cmd/frostfs-cli/modules/control/writecache.go | 4 +
internal/logs/logs.go | 3 +
pkg/local_object_storage/engine/writecache.go | 3 +-
pkg/local_object_storage/shard/control.go | 6 +
pkg/local_object_storage/shard/shard.go | 6 +-
pkg/local_object_storage/shard/writecache.go | 54 ++-
.../control/server/seal_writecache.go | 1 +
pkg/services/control/service.pb.go | 340 +++++++++---------
pkg/services/control/service.proto | 3 +
pkg/services/control/service_frostfs.pb.go | 2 +
10 files changed, 252 insertions(+), 170 deletions(-)
diff --git a/cmd/frostfs-cli/modules/control/writecache.go b/cmd/frostfs-cli/modules/control/writecache.go
index b725d8471..ffe9009ab 100644
--- a/cmd/frostfs-cli/modules/control/writecache.go
+++ b/cmd/frostfs-cli/modules/control/writecache.go
@@ -10,6 +10,7 @@ import (
)
const (
+ asyncFlag = "async"
restoreModeFlag = "restore-mode"
shrinkFlag = "shrink"
)
@@ -31,12 +32,14 @@ func sealWritecache(cmd *cobra.Command, _ []string) {
pk := key.Get(cmd)
ignoreErrors, _ := cmd.Flags().GetBool(ignoreErrorsFlag)
+ async, _ := cmd.Flags().GetBool(asyncFlag)
restoreMode, _ := cmd.Flags().GetBool(restoreModeFlag)
shrink, _ := cmd.Flags().GetBool(shrinkFlag)
req := &control.SealWriteCacheRequest{Body: &control.SealWriteCacheRequest_Body{
Shard_ID: getShardIDList(cmd),
IgnoreErrors: ignoreErrors,
+ Async: async,
RestoreMode: restoreMode,
Shrink: shrink,
}}
@@ -77,6 +80,7 @@ func initControlShardsWritecacheCmd() {
ff.StringSlice(shardIDFlag, nil, "List of shard IDs in base58 encoding")
ff.Bool(shardAllFlag, false, "Process all shards")
ff.Bool(ignoreErrorsFlag, true, "Skip invalid/unreadable objects")
+ ff.Bool(asyncFlag, false, "Run operation in background")
ff.Bool(restoreModeFlag, false, "Restore writecache's mode after sealing")
ff.Bool(shrinkFlag, false, "Shrink writecache's internal storage")
diff --git a/internal/logs/logs.go b/internal/logs/logs.go
index ebb822e1c..78bcd0c0e 100644
--- a/internal/logs/logs.go
+++ b/internal/logs/logs.go
@@ -539,5 +539,8 @@ const (
PolicerCouldNotGetChunk = "could not get EC chunk"
PolicerCouldNotGetChunks = "could not get EC chunks"
AuditEventLogRecord = "audit event log record"
+ StartedWritecacheSealAsync = "started writecache seal async"
+ WritecacheSealCompletedAsync = "writecache seal completed successfully"
+ FailedToSealWritecacheAsync = "failed to seal writecache async"
WritecacheShrinkSkippedNotEmpty = "writecache shrink skipped: database is not empty"
)
diff --git a/pkg/local_object_storage/engine/writecache.go b/pkg/local_object_storage/engine/writecache.go
index 2c5e8cc3a..3e8f387ef 100644
--- a/pkg/local_object_storage/engine/writecache.go
+++ b/pkg/local_object_storage/engine/writecache.go
@@ -70,6 +70,7 @@ func (e *StorageEngine) FlushWriteCache(ctx context.Context, p FlushWriteCachePr
type SealWriteCachePrm struct {
ShardIDs []*shard.ID
IgnoreErrors bool
+ Async bool
RestoreMode bool
Shrink bool
}
@@ -117,7 +118,7 @@ func (e *StorageEngine) SealWriteCache(ctx context.Context, prm SealWriteCachePr
return nil
}
- err := sh.SealWriteCache(egCtx, shard.SealWriteCachePrm{IgnoreErrors: prm.IgnoreErrors, RestoreMode: prm.RestoreMode, Shrink: prm.Shrink})
+ err := sh.SealWriteCache(egCtx, shard.SealWriteCachePrm{IgnoreErrors: prm.IgnoreErrors, Async: prm.Async, RestoreMode: prm.RestoreMode, Shrink: prm.Shrink})
resGuard.Lock()
defer resGuard.Unlock()
diff --git a/pkg/local_object_storage/shard/control.go b/pkg/local_object_storage/shard/control.go
index 90d7afdd4..210744702 100644
--- a/pkg/local_object_storage/shard/control.go
+++ b/pkg/local_object_storage/shard/control.go
@@ -178,6 +178,7 @@ func (s *Shard) Init(ctx context.Context) error {
if !m.NoMetabase() {
s.rb.Start(ctx, s.blobStor, s.metaBase, s.log)
}
+ s.writecacheSealCancel.Store(dummyCancel)
return nil
}
@@ -350,6 +351,8 @@ func (s *Shard) Close() error {
}
if s.hasWriteCache() {
+ prev := s.writecacheSealCancel.Swap(notInitializedCancel)
+ prev.cancel() // no need to wait: writecache.Seal and writecache.Close lock the same mutex
components = append(components, s.writeCache)
}
@@ -428,6 +431,9 @@ func (s *Shard) lockExclusive() func() {
cancelGC := val.(context.CancelFunc)
cancelGC()
}
+ if c := s.writecacheSealCancel.Load(); c != nil {
+ c.cancel()
+ }
s.m.Lock()
s.setModeRequested.Store(false)
return s.m.Unlock
diff --git a/pkg/local_object_storage/shard/shard.go b/pkg/local_object_storage/shard/shard.go
index 94f22feb5..93f5354a7 100644
--- a/pkg/local_object_storage/shard/shard.go
+++ b/pkg/local_object_storage/shard/shard.go
@@ -37,8 +37,9 @@ type Shard struct {
rb *rebuilder
- gcCancel atomic.Value
- setModeRequested atomic.Bool
+ gcCancel atomic.Value
+ setModeRequested atomic.Bool
+ writecacheSealCancel atomic.Pointer[writecacheSealCanceler]
}
// Option represents Shard's constructor option.
@@ -190,6 +191,7 @@ func New(opts ...Option) *Shard {
}
s.fillInfo()
+ s.writecacheSealCancel.Store(notInitializedCancel)
return s
}
diff --git a/pkg/local_object_storage/shard/writecache.go b/pkg/local_object_storage/shard/writecache.go
index c29710930..a6de07f03 100644
--- a/pkg/local_object_storage/shard/writecache.go
+++ b/pkg/local_object_storage/shard/writecache.go
@@ -4,12 +4,24 @@ import (
"context"
"errors"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
+ "go.uber.org/zap"
)
+var (
+ dummyCancel = &writecacheSealCanceler{cancel: func() {}}
+ notInitializedCancel = &writecacheSealCanceler{cancel: func() {}}
+ errWriteCacheSealing = errors.New("writecache is already sealing or shard is not initialized")
+)
+
+type writecacheSealCanceler struct {
+ cancel context.CancelFunc
+}
+
// FlushWriteCachePrm represents parameters of a `FlushWriteCache` operation.
type FlushWriteCachePrm struct {
ignoreErrors bool
@@ -60,6 +72,7 @@ func (s *Shard) FlushWriteCache(ctx context.Context, p FlushWriteCachePrm) error
type SealWriteCachePrm struct {
IgnoreErrors bool
+ Async bool
RestoreMode bool
Shrink bool
}
@@ -78,15 +91,52 @@ func (s *Shard) SealWriteCache(ctx context.Context, p SealWriteCachePrm) error {
return errWriteCacheDisabled
}
+ if p.Async {
+ ctx = context.WithoutCancel(ctx)
+ }
+ ctx, cancel := context.WithCancel(ctx)
+ canceler := &writecacheSealCanceler{cancel: cancel}
+ if !s.writecacheSealCancel.CompareAndSwap(dummyCancel, canceler) {
+ return errWriteCacheSealing
+ }
s.m.RLock()
- defer s.m.RUnlock()
+ cleanup := func() {
+ s.m.RUnlock()
+ s.writecacheSealCancel.Store(dummyCancel)
+ }
if s.info.Mode.ReadOnly() {
+ cleanup()
return ErrReadOnlyMode
}
if s.info.Mode.NoMetabase() {
+ cleanup()
return ErrDegradedMode
}
- return s.writeCache.Seal(ctx, writecache.SealPrm{IgnoreErrors: p.IgnoreErrors, RestoreMode: p.RestoreMode, Shrink: p.Shrink})
+ if !p.Async {
+ defer cleanup()
+ }
+ prm := writecache.SealPrm{IgnoreErrors: p.IgnoreErrors, RestoreMode: p.RestoreMode, Shrink: p.Shrink}
+ if p.Async {
+ started := make(chan struct{})
+ go func() {
+ close(started)
+ defer cleanup()
+
+ s.log.Info(logs.StartedWritecacheSealAsync)
+ if err := s.writeCache.Seal(ctx, prm); err != nil {
+ s.log.Warn(logs.FailedToSealWritecacheAsync, zap.Error(err))
+ return
+ }
+ s.log.Info(logs.WritecacheSealCompletedAsync)
+ }()
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ case <-started:
+ return nil
+ }
+ }
+ return s.writeCache.Seal(ctx, prm)
}
diff --git a/pkg/services/control/server/seal_writecache.go b/pkg/services/control/server/seal_writecache.go
index 697b91918..1737677b7 100644
--- a/pkg/services/control/server/seal_writecache.go
+++ b/pkg/services/control/server/seal_writecache.go
@@ -19,6 +19,7 @@ func (s *Server) SealWriteCache(ctx context.Context, req *control.SealWriteCache
prm := engine.SealWriteCachePrm{
ShardIDs: s.getShardIDList(req.GetBody().GetShard_ID()),
IgnoreErrors: req.GetBody().GetIgnoreErrors(),
+ Async: req.GetBody().GetAsync(),
RestoreMode: req.GetBody().GetRestoreMode(),
Shrink: req.GetBody().GetShrink(),
}
diff --git a/pkg/services/control/service.pb.go b/pkg/services/control/service.pb.go
index ac512f1a5..e5a5ce24c 100644
--- a/pkg/services/control/service.pb.go
+++ b/pkg/services/control/service.pb.go
@@ -4727,6 +4727,8 @@ type SealWriteCacheRequest_Body struct {
Shard_ID [][]byte `protobuf:"bytes,1,rep,name=shard_ID,json=shardID,proto3" json:"shard_ID,omitempty"`
// Flag indicating whether object read errors should be ignored.
IgnoreErrors bool `protobuf:"varint,2,opt,name=ignore_errors,json=ignoreErrors,proto3" json:"ignore_errors,omitempty"`
+ // Flag indicating whether writecache will be sealed async.
+ Async bool `protobuf:"varint,3,opt,name=async,proto3" json:"async,omitempty"`
// If true, then writecache will be sealed, but mode will be restored to the current one.
RestoreMode bool `protobuf:"varint,4,opt,name=restore_mode,json=restoreMode,proto3" json:"restore_mode,omitempty"`
// If true, then writecache will shrink internal storage.
@@ -4779,6 +4781,13 @@ func (x *SealWriteCacheRequest_Body) GetIgnoreErrors() bool {
return false
}
+func (x *SealWriteCacheRequest_Body) GetAsync() bool {
+ if x != nil {
+ return x.Async
+ }
+ return false
+}
+
func (x *SealWriteCacheRequest_Body) GetRestoreMode() bool {
if x != nil {
return x.RestoreMode
@@ -5500,7 +5509,7 @@ var file_pkg_services_control_service_proto_rawDesc = []byte{
0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02,
0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53,
0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74,
- 0x75, 0x72, 0x65, 0x1a, 0x06, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x22, 0x86, 0x02, 0x0a, 0x15,
+ 0x75, 0x72, 0x65, 0x1a, 0x06, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x22, 0x9c, 0x02, 0x0a, 0x15,
0x53, 0x65, 0x61, 0x6c, 0x57, 0x72, 0x69, 0x74, 0x65, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65,
0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x37, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20,
0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x65,
@@ -5509,186 +5518,187 @@ var file_pkg_services_control_service_proto_rawDesc = []byte{
0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e,
0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65,
- 0x1a, 0x81, 0x01, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x68, 0x61,
+ 0x1a, 0x97, 0x01, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x68, 0x61,
0x72, 0x64, 0x5f, 0x49, 0x44, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x07, 0x73, 0x68, 0x61,
0x72, 0x64, 0x49, 0x44, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x65,
0x72, 0x72, 0x6f, 0x72, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x69, 0x67, 0x6e,
- 0x6f, 0x72, 0x65, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x65, 0x73,
- 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52,
- 0x0b, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x16, 0x0a, 0x06,
- 0x73, 0x68, 0x72, 0x69, 0x6e, 0x6b, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x73, 0x68,
- 0x72, 0x69, 0x6e, 0x6b, 0x22, 0xa9, 0x02, 0x0a, 0x16, 0x53, 0x65, 0x61, 0x6c, 0x57, 0x72, 0x69,
- 0x74, 0x65, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
- 0x38, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e,
- 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x65, 0x61, 0x6c, 0x57, 0x72, 0x69, 0x74,
- 0x65, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42,
- 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67,
- 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63,
- 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65,
- 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0xa2, 0x01, 0x0a, 0x04,
- 0x42, 0x6f, 0x64, 0x79, 0x12, 0x45, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18,
- 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e,
- 0x53, 0x65, 0x61, 0x6c, 0x57, 0x72, 0x69, 0x74, 0x65, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65,
- 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x2e, 0x53, 0x74, 0x61, 0x74,
- 0x75, 0x73, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x1a, 0x53, 0x0a, 0x06, 0x53,
- 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x49,
- 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x44,
- 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28,
- 0x08, 0x52, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72,
- 0x72, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72,
- 0x22, 0xa1, 0x01, 0x0a, 0x13, 0x44, 0x65, 0x74, 0x61, 0x63, 0x68, 0x53, 0x68, 0x61, 0x72, 0x64,
- 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x35, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c,
- 0x2e, 0x44, 0x65, 0x74, 0x61, 0x63, 0x68, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12,
+ 0x6f, 0x72, 0x65, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x61, 0x73, 0x79,
+ 0x6e, 0x63, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x61, 0x73, 0x79, 0x6e, 0x63, 0x12,
+ 0x21, 0x0a, 0x0c, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x18,
+ 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4d, 0x6f,
+ 0x64, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x68, 0x72, 0x69, 0x6e, 0x6b, 0x18, 0x05, 0x20, 0x01,
+ 0x28, 0x08, 0x52, 0x06, 0x73, 0x68, 0x72, 0x69, 0x6e, 0x6b, 0x22, 0xa9, 0x02, 0x0a, 0x16, 0x53,
+ 0x65, 0x61, 0x6c, 0x57, 0x72, 0x69, 0x74, 0x65, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x38, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x65,
+ 0x61, 0x6c, 0x57, 0x72, 0x69, 0x74, 0x65, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x73, 0x70,
+ 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12,
0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01,
0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67,
0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72,
- 0x65, 0x1a, 0x21, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x68, 0x61,
- 0x72, 0x64, 0x5f, 0x49, 0x44, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x07, 0x73, 0x68, 0x61,
- 0x72, 0x64, 0x49, 0x44, 0x22, 0x88, 0x01, 0x0a, 0x14, 0x44, 0x65, 0x74, 0x61, 0x63, 0x68, 0x53,
- 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x36, 0x0a,
- 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x63, 0x6f,
- 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x44, 0x65, 0x74, 0x61, 0x63, 0x68, 0x53, 0x68, 0x61, 0x72,
- 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52,
+ 0x65, 0x1a, 0xa2, 0x01, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x45, 0x0a, 0x07, 0x72, 0x65,
+ 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x63, 0x6f,
+ 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x65, 0x61, 0x6c, 0x57, 0x72, 0x69, 0x74, 0x65, 0x43,
+ 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64,
+ 0x79, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74,
+ 0x73, 0x1a, 0x53, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x73,
+ 0x68, 0x61, 0x72, 0x64, 0x5f, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x73,
+ 0x68, 0x61, 0x72, 0x64, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73,
+ 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73,
+ 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0xa1, 0x01, 0x0a, 0x13, 0x44, 0x65, 0x74, 0x61, 0x63,
+ 0x68, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x35,
+ 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x63,
+ 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x44, 0x65, 0x74, 0x61, 0x63, 0x68, 0x53, 0x68, 0x61,
+ 0x72, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52,
0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75,
0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72,
0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69,
- 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x06, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x32,
- 0x8b, 0x10, 0x0a, 0x0e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x53, 0x65, 0x72, 0x76, 0x69,
- 0x63, 0x65, 0x12, 0x48, 0x0a, 0x0b, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63,
- 0x6b, 0x12, 0x1b, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x48, 0x65, 0x61, 0x6c,
- 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c,
- 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43,
- 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x54, 0x0a, 0x0f,
- 0x53, 0x65, 0x74, 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12,
- 0x1f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x65, 0x74, 0x4e, 0x65, 0x74,
- 0x6d, 0x61, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x1a, 0x20, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x65, 0x74, 0x4e, 0x65,
- 0x74, 0x6d, 0x61, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x12, 0x54, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x53,
+ 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x21, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12,
+ 0x19, 0x0a, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x49, 0x44, 0x18, 0x01, 0x20, 0x03, 0x28,
+ 0x0c, 0x52, 0x07, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x44, 0x22, 0x88, 0x01, 0x0a, 0x14, 0x44,
+ 0x65, 0x74, 0x61, 0x63, 0x68, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x12, 0x36, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x22, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x44, 0x65, 0x74, 0x61,
+ 0x63, 0x68, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73,
+ 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12,
+ 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75,
+ 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x06, 0x0a,
+ 0x04, 0x42, 0x6f, 0x64, 0x79, 0x32, 0x8b, 0x10, 0x0a, 0x0e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f,
+ 0x6c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x48, 0x0a, 0x0b, 0x48, 0x65, 0x61, 0x6c,
+ 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x1b, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f,
+ 0x6c, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x48,
+ 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x12, 0x54, 0x0a, 0x0f, 0x53, 0x65, 0x74, 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x53,
0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e,
- 0x47, 0x65, 0x74, 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52,
+ 0x53, 0x65, 0x74, 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52,
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c,
- 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x48, 0x0a, 0x0b, 0x44, 0x72, 0x6f, 0x70,
- 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x1b, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f,
- 0x6c, 0x2e, 0x44, 0x72, 0x6f, 0x70, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x44,
- 0x72, 0x6f, 0x70, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x12, 0x45, 0x0a, 0x0a, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73,
- 0x12, 0x1a, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53,
- 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x63,
- 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64,
- 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4b, 0x0a, 0x0c, 0x53, 0x65, 0x74,
- 0x53, 0x68, 0x61, 0x72, 0x64, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x1c, 0x2e, 0x63, 0x6f, 0x6e, 0x74,
- 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x4d, 0x6f, 0x64, 0x65,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f,
- 0x6c, 0x2e, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x65,
- 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x54, 0x0a, 0x0f, 0x53, 0x79, 0x6e, 0x63, 0x68, 0x72,
- 0x6f, 0x6e, 0x69, 0x7a, 0x65, 0x54, 0x72, 0x65, 0x65, 0x12, 0x1f, 0x2e, 0x63, 0x6f, 0x6e, 0x74,
- 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x65, 0x54,
- 0x72, 0x65, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x63, 0x6f, 0x6e,
- 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x65,
- 0x54, 0x72, 0x65, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4e, 0x0a, 0x0d,
- 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x1d, 0x2e,
- 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x65,
- 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x63,
- 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x65, 0x53,
- 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x63, 0x0a, 0x14,
- 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x24, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53,
- 0x74, 0x61, 0x72, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x63, 0x6f, 0x6e,
+ 0x2e, 0x53, 0x65, 0x74, 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x54, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x4e,
+ 0x65, 0x74, 0x6d, 0x61, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1f, 0x2e, 0x63, 0x6f,
+ 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x53,
+ 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x63,
+ 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70,
+ 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x48,
+ 0x0a, 0x0b, 0x44, 0x72, 0x6f, 0x70, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x1b, 0x2e,
+ 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x44, 0x72, 0x6f, 0x70, 0x4f, 0x62, 0x6a, 0x65,
+ 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x63, 0x6f, 0x6e,
+ 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x44, 0x72, 0x6f, 0x70, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x45, 0x0a, 0x0a, 0x4c, 0x69, 0x73, 0x74,
+ 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x1a, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c,
+ 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x4c, 0x69, 0x73,
+ 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
+ 0x4b, 0x0a, 0x0c, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x4d, 0x6f, 0x64, 0x65, 0x12,
+ 0x1c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61,
+ 0x72, 0x64, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e,
+ 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64,
+ 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x54, 0x0a, 0x0f,
+ 0x53, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x65, 0x54, 0x72, 0x65, 0x65, 0x12,
+ 0x1f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x68, 0x72,
+ 0x6f, 0x6e, 0x69, 0x7a, 0x65, 0x54, 0x72, 0x65, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x1a, 0x20, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x68,
+ 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x65, 0x54, 0x72, 0x65, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x12, 0x4e, 0x0a, 0x0d, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x65, 0x53, 0x68,
+ 0x61, 0x72, 0x64, 0x12, 0x1d, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x45, 0x76,
+ 0x61, 0x63, 0x75, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x45, 0x76, 0x61,
+ 0x63, 0x75, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x12, 0x63, 0x0a, 0x14, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64,
+ 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x24, 0x2e, 0x63, 0x6f, 0x6e,
0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45,
- 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
- 0x65, 0x12, 0x6f, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61,
- 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x28, 0x2e,
+ 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x1a, 0x25, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74,
+ 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6f, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x53, 0x68,
+ 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61,
+ 0x74, 0x75, 0x73, 0x12, 0x28, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x47, 0x65,
+ 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e,
0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64,
0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x75, 0x0a, 0x1a, 0x52, 0x65, 0x73, 0x65,
+ 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2a, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c,
+ 0x2e, 0x52, 0x65, 0x73, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x52, 0x65, 0x73,
+ 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
+ 0x60, 0x0a, 0x13, 0x53, 0x74, 0x6f, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63,
+ 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c,
+ 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x63, 0x6f,
+ 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45,
+ 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x12, 0x45, 0x0a, 0x0a, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x43, 0x61, 0x63, 0x68, 0x65, 0x12,
+ 0x1a, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x43,
+ 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x63, 0x6f,
+ 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x43, 0x61, 0x63, 0x68, 0x65,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x39, 0x0a, 0x06, 0x44, 0x6f, 0x63, 0x74,
+ 0x6f, 0x72, 0x12, 0x16, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x44, 0x6f, 0x63,
+ 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x63, 0x6f, 0x6e,
+ 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x44, 0x6f, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x12, 0x66, 0x0a, 0x15, 0x41, 0x64, 0x64, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c,
+ 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x12, 0x25, 0x2e, 0x63,
+ 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x41, 0x64, 0x64, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c,
+ 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x41, 0x64,
+ 0x64, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72,
+ 0x69, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x66, 0x0a, 0x15, 0x47,
+ 0x65, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72,
+ 0x72, 0x69, 0x64, 0x65, 0x12, 0x25, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x47,
+ 0x65, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72,
+ 0x72, 0x69, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x63, 0x6f,
+ 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f,
+ 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x12, 0x6c, 0x0a, 0x17, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e,
+ 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x12, 0x27,
+ 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x68, 0x61,
+ 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f,
+ 0x6c, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c,
+ 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x12, 0x6f, 0x0a, 0x18, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e,
+ 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x12, 0x28, 0x2e,
+ 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x43, 0x68,
+ 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65,
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f,
- 0x6c, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x12, 0x75, 0x0a, 0x1a, 0x52, 0x65, 0x73, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64,
- 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
- 0x12, 0x2a, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x74,
- 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53,
- 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x63,
- 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72,
- 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75,
- 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x60, 0x0a, 0x13, 0x53, 0x74, 0x6f,
- 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x12, 0x23, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x53,
- 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e,
- 0x53, 0x74, 0x6f, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x45, 0x0a, 0x0a, 0x46,
- 0x6c, 0x75, 0x73, 0x68, 0x43, 0x61, 0x63, 0x68, 0x65, 0x12, 0x1a, 0x2e, 0x63, 0x6f, 0x6e, 0x74,
- 0x72, 0x6f, 0x6c, 0x2e, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e,
- 0x46, 0x6c, 0x75, 0x73, 0x68, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x12, 0x39, 0x0a, 0x06, 0x44, 0x6f, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x16, 0x2e, 0x63,
- 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x44, 0x6f, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x44,
- 0x6f, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x66, 0x0a,
- 0x15, 0x41, 0x64, 0x64, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76,
- 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x12, 0x25, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c,
- 0x2e, 0x41, 0x64, 0x64, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76,
- 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e,
- 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x41, 0x64, 0x64, 0x43, 0x68, 0x61, 0x69, 0x6e,
- 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x66, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x43, 0x68, 0x61, 0x69,
- 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x12, 0x25,
- 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x68, 0x61, 0x69,
- 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e,
- 0x47, 0x65, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65,
- 0x72, 0x72, 0x69, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6c, 0x0a,
- 0x17, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f,
- 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x12, 0x27, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72,
- 0x6f, 0x6c, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61,
- 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x1a, 0x28, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x4c, 0x69, 0x73, 0x74,
- 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69,
- 0x64, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6f, 0x0a, 0x18, 0x52,
- 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f,
- 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x12, 0x28, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f,
0x6c, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63,
- 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x1a, 0x29, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x52, 0x65, 0x6d, 0x6f,
- 0x76, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72,
- 0x72, 0x69, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x8a, 0x01, 0x0a,
- 0x21, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61,
- 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x42, 0x79, 0x54, 0x61, 0x72, 0x67,
- 0x65, 0x74, 0x12, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x52, 0x65, 0x6d,
- 0x6f, 0x76, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65,
- 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x42, 0x79, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x32, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e,
- 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c,
- 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x42, 0x79, 0x54, 0x61, 0x72, 0x67, 0x65,
- 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x72, 0x0a, 0x19, 0x4c, 0x69, 0x73,
- 0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65,
- 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x12, 0x29, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c,
- 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x4c, 0x6f, 0x63, 0x61,
- 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x1a, 0x2a, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x4c, 0x69, 0x73, 0x74,
- 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72,
- 0x72, 0x69, 0x64, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x51, 0x0a,
- 0x0e, 0x53, 0x65, 0x61, 0x6c, 0x57, 0x72, 0x69, 0x74, 0x65, 0x43, 0x61, 0x63, 0x68, 0x65, 0x12,
- 0x1e, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x65, 0x61, 0x6c, 0x57, 0x72,
- 0x69, 0x74, 0x65, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
- 0x1f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x65, 0x61, 0x6c, 0x57, 0x72,
- 0x69, 0x74, 0x65, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
- 0x12, 0x4b, 0x0a, 0x0c, 0x44, 0x65, 0x74, 0x61, 0x63, 0x68, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73,
- 0x12, 0x1c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x44, 0x65, 0x74, 0x61, 0x63,
- 0x68, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d,
- 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x44, 0x65, 0x74, 0x61, 0x63, 0x68, 0x53,
- 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x41, 0x5a,
- 0x3f, 0x67, 0x69, 0x74, 0x2e, 0x66, 0x72, 0x6f, 0x73, 0x74, 0x66, 0x73, 0x2e, 0x69, 0x6e, 0x66,
- 0x6f, 0x2f, 0x54, 0x72, 0x75, 0x65, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x4c, 0x61, 0x62, 0x2f, 0x66,
- 0x72, 0x6f, 0x73, 0x74, 0x66, 0x73, 0x2d, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x70, 0x6b, 0x67, 0x2f,
- 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c,
- 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+ 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x12, 0x8a, 0x01, 0x0a, 0x21, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x43, 0x68, 0x61,
+ 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73,
+ 0x42, 0x79, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72,
+ 0x6f, 0x6c, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f,
+ 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x42, 0x79, 0x54, 0x61,
+ 0x72, 0x67, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x32, 0x2e, 0x63, 0x6f,
+ 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x43, 0x68, 0x61, 0x69,
+ 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x42,
+ 0x79, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
+ 0x72, 0x0a, 0x19, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x4c, 0x6f,
+ 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x12, 0x29, 0x2e, 0x63,
+ 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65,
+ 0x74, 0x73, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f,
+ 0x6c, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x4c, 0x6f, 0x63,
+ 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x12, 0x51, 0x0a, 0x0e, 0x53, 0x65, 0x61, 0x6c, 0x57, 0x72, 0x69, 0x74, 0x65,
+ 0x43, 0x61, 0x63, 0x68, 0x65, 0x12, 0x1e, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e,
+ 0x53, 0x65, 0x61, 0x6c, 0x57, 0x72, 0x69, 0x74, 0x65, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e,
+ 0x53, 0x65, 0x61, 0x6c, 0x57, 0x72, 0x69, 0x74, 0x65, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4b, 0x0a, 0x0c, 0x44, 0x65, 0x74, 0x61, 0x63, 0x68,
+ 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x1c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c,
+ 0x2e, 0x44, 0x65, 0x74, 0x61, 0x63, 0x68, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x44,
+ 0x65, 0x74, 0x61, 0x63, 0x68, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x42, 0x41, 0x5a, 0x3f, 0x67, 0x69, 0x74, 0x2e, 0x66, 0x72, 0x6f, 0x73, 0x74,
+ 0x66, 0x73, 0x2e, 0x69, 0x6e, 0x66, 0x6f, 0x2f, 0x54, 0x72, 0x75, 0x65, 0x43, 0x6c, 0x6f, 0x75,
+ 0x64, 0x4c, 0x61, 0x62, 0x2f, 0x66, 0x72, 0x6f, 0x73, 0x74, 0x66, 0x73, 0x2d, 0x6e, 0x6f, 0x64,
+ 0x65, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x63,
+ 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
diff --git a/pkg/services/control/service.proto b/pkg/services/control/service.proto
index 486f30a93..d6639cb48 100644
--- a/pkg/services/control/service.proto
+++ b/pkg/services/control/service.proto
@@ -656,6 +656,9 @@ message SealWriteCacheRequest {
// Flag indicating whether object read errors should be ignored.
bool ignore_errors = 2;
+ // Flag indicating whether writecache will be sealed async.
+ bool async = 3;
+
// If true, then writecache will be sealed, but mode will be restored to the current one.
bool restore_mode = 4;
diff --git a/pkg/services/control/service_frostfs.pb.go b/pkg/services/control/service_frostfs.pb.go
index 417d25c05..822244e77 100644
--- a/pkg/services/control/service_frostfs.pb.go
+++ b/pkg/services/control/service_frostfs.pb.go
@@ -3422,6 +3422,7 @@ func (x *SealWriteCacheRequest_Body) StableSize() (size int) {
}
size += proto.RepeatedBytesSize(1, x.Shard_ID)
size += proto.BoolSize(2, x.IgnoreErrors)
+ size += proto.BoolSize(3, x.Async)
size += proto.BoolSize(4, x.RestoreMode)
size += proto.BoolSize(5, x.Shrink)
return size
@@ -3445,6 +3446,7 @@ func (x *SealWriteCacheRequest_Body) StableMarshal(buf []byte) []byte {
var offset int
offset += proto.RepeatedBytesMarshal(1, buf[offset:], x.Shard_ID)
offset += proto.BoolMarshal(2, buf[offset:], x.IgnoreErrors)
+ offset += proto.BoolMarshal(3, buf[offset:], x.Async)
offset += proto.BoolMarshal(4, buf[offset:], x.RestoreMode)
offset += proto.BoolMarshal(5, buf[offset:], x.Shrink)
return buf
From 80ce7c3a0095e707e44accf30be6bf7f61bfacc4 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Wed, 31 Jul 2024 16:33:31 +0300
Subject: [PATCH 030/705] [#1284] shard: Resolve funlen linter
Signed-off-by: Dmitrii Stepanov
---
pkg/local_object_storage/shard/control.go | 71 +++++++++++++----------
1 file changed, 39 insertions(+), 32 deletions(-)
diff --git a/pkg/local_object_storage/shard/control.go b/pkg/local_object_storage/shard/control.go
index 210744702..936a506c0 100644
--- a/pkg/local_object_storage/shard/control.go
+++ b/pkg/local_object_storage/shard/control.go
@@ -99,12 +99,50 @@ func (x *metabaseSynchronizer) Init() error {
// Init initializes all Shard's components.
func (s *Shard) Init(ctx context.Context) error {
+ m := s.GetMode()
+ if err := s.initializeComponents(m); err != nil {
+ return err
+ }
+
+ s.updateMetrics(ctx)
+
+ s.gc = &gc{
+ gcCfg: &s.gcCfg,
+ remover: s.removeGarbage,
+ stopChannel: make(chan struct{}),
+ eventChan: make(chan Event),
+ mEventHandler: map[eventType]*eventHandlers{
+ eventNewEpoch: {
+ cancelFunc: func() {},
+ handlers: []eventHandler{
+ s.collectExpiredLocks,
+ s.collectExpiredObjects,
+ s.collectExpiredTombstones,
+ s.collectExpiredMetrics,
+ },
+ },
+ },
+ }
+ if s.gc.metrics != nil {
+ s.gc.metrics.SetShardID(s.info.ID.String())
+ }
+
+ s.gc.init(ctx)
+
+ s.rb = newRebuilder(s.rebuildLimiter)
+ if !m.NoMetabase() {
+ s.rb.Start(ctx, s.blobStor, s.metaBase, s.log)
+ }
+ s.writecacheSealCancel.Store(dummyCancel)
+ return nil
+}
+
+func (s *Shard) initializeComponents(m mode.Mode) error {
type initializer interface {
Init() error
}
var components []initializer
- m := s.GetMode()
if !m.NoMetabase() {
var initMetabase initializer
@@ -148,37 +186,6 @@ func (s *Shard) Init(ctx context.Context) error {
return fmt.Errorf("could not initialize %T: %w", component, err)
}
}
-
- s.updateMetrics(ctx)
-
- s.gc = &gc{
- gcCfg: &s.gcCfg,
- remover: s.removeGarbage,
- stopChannel: make(chan struct{}),
- eventChan: make(chan Event),
- mEventHandler: map[eventType]*eventHandlers{
- eventNewEpoch: {
- cancelFunc: func() {},
- handlers: []eventHandler{
- s.collectExpiredLocks,
- s.collectExpiredObjects,
- s.collectExpiredTombstones,
- s.collectExpiredMetrics,
- },
- },
- },
- }
- if s.gc.metrics != nil {
- s.gc.metrics.SetShardID(s.info.ID.String())
- }
-
- s.gc.init(ctx)
-
- s.rb = newRebuilder(s.rebuildLimiter)
- if !m.NoMetabase() {
- s.rb.Start(ctx, s.blobStor, s.metaBase, s.log)
- }
- s.writecacheSealCancel.Store(dummyCancel)
return nil
}
From d055168e2a9651c357e4358c1dcc392bb4ae4718 Mon Sep 17 00:00:00 2001
From: Ekaterina Lebedeva
Date: Wed, 14 Aug 2024 13:47:50 +0300
Subject: [PATCH 031/705] [#1135] ir: Add healthstatus RECONFIGURING
Signed-off-by: Ekaterina Lebedeva
---
cmd/frostfs-ir/config.go | 6 ++++++
go.mod | 6 +++---
go.sum | 12 ++++++------
pkg/innerring/state.go | 12 ++++++++++++
pkg/services/control/ir/types.pb.go | 18 ++++++++++++------
pkg/services/control/ir/types.proto | 3 +++
6 files changed, 42 insertions(+), 15 deletions(-)
diff --git a/cmd/frostfs-ir/config.go b/cmd/frostfs-ir/config.go
index 955195477..4eaac845c 100644
--- a/cmd/frostfs-ir/config.go
+++ b/cmd/frostfs-ir/config.go
@@ -7,6 +7,7 @@ import (
configViper "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/config"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ control "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir"
"github.com/spf13/viper"
"go.uber.org/zap"
)
@@ -81,6 +82,10 @@ func watchForSignal(cancel func()) {
return
case <-sighupCh:
log.Info(logs.FrostFSNodeSIGHUPHasBeenReceivedRereadingConfiguration)
+ if !innerRing.CompareAndSwapHealthStatus(control.HealthStatus_READY, control.HealthStatus_RECONFIGURING) {
+ log.Info(logs.FrostFSNodeSIGHUPSkip)
+ break
+ }
err := reloadConfig()
if err != nil {
log.Error(logs.FrostFSNodeConfigurationReading, zap.Error(err))
@@ -92,6 +97,7 @@ func watchForSignal(cancel func()) {
if err != nil {
log.Error(logs.FrostFSNodeConfigurationReading, zap.Error(err))
}
+ innerRing.CompareAndSwapHealthStatus(control.HealthStatus_RECONFIGURING, control.HealthStatus_READY)
log.Info(logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully)
}
}
diff --git a/go.mod b/go.mod
index 09a098502..196b4d463 100644
--- a/go.mod
+++ b/go.mod
@@ -59,7 +59,7 @@ require (
github.com/beorn7/perks v1.0.1 // indirect
github.com/bits-and-blooms/bitset v1.13.0 // indirect
github.com/cenkalti/backoff/v4 v4.2.1 // indirect
- github.com/cespare/xxhash/v2 v2.2.0 // indirect
+ github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/consensys/bavard v0.1.13 // indirect
github.com/consensys/gnark-crypto v0.12.2-0.20231222162921-eb75782795d2 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect
@@ -120,8 +120,8 @@ require (
golang.org/x/crypto v0.21.0 // indirect
golang.org/x/net v0.23.0 // indirect
golang.org/x/text v0.16.0 // indirect
- google.golang.org/genproto/googleapis/api v0.0.0-20240311132316-a219d84964c2 // indirect
- google.golang.org/genproto/googleapis/rpc v0.0.0-20240314234333-6e1732d8331c // indirect
+ google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
lukechampine.com/blake3 v1.2.1 // indirect
rsc.io/tmplfunc v0.0.3 // indirect
diff --git a/go.sum b/go.sum
index 1034ff61f..bd6d85882 100644
--- a/go.sum
+++ b/go.sum
@@ -33,8 +33,8 @@ github.com/bits-and-blooms/bitset v1.13.0 h1:bAQ9OPNFYbGHV6Nez0tmNI0RiEu7/hxlYJR
github.com/bits-and-blooms/bitset v1.13.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
-github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
-github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
+github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cheggaaa/pb v1.0.29 h1:FckUN5ngEk2LpvuG0fw1GEFx6LtyY2pWI/Z2QgCnEYo=
github.com/cheggaaa/pb v1.0.29/go.mod h1:W40334L7FMC5JKWldsTWbdGjLo0RxUKK73K+TuPxX30=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
@@ -380,10 +380,10 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
-google.golang.org/genproto/googleapis/api v0.0.0-20240311132316-a219d84964c2 h1:rIo7ocm2roD9DcFIX67Ym8icoGCKSARAiPljFhh5suQ=
-google.golang.org/genproto/googleapis/api v0.0.0-20240311132316-a219d84964c2/go.mod h1:O1cOfN1Cy6QEYr7VxtjOyP5AdAuR0aJ/MYZaaof623Y=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240314234333-6e1732d8331c h1:lfpJ/2rWPa/kJgxyyXM8PrNnfCzcmxJ265mADgwmvLI=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240314234333-6e1732d8331c/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY=
+google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237 h1:RFiFrvy37/mpSpdySBDrUdipW/dHwsRwh3J3+A9VgT4=
+google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237/go.mod h1:Z5Iiy3jtmioajWHDGFk7CeugTyHtPvMHA4UTmUkyalE=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 h1:NnYq6UN9ReLM9/Y01KWNOWyI5xQ9kbIms5GGJVwS/Yc=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY=
google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM=
google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
diff --git a/pkg/innerring/state.go b/pkg/innerring/state.go
index 1616dbb9f..d3071faad 100644
--- a/pkg/innerring/state.go
+++ b/pkg/innerring/state.go
@@ -161,6 +161,16 @@ func (s *Server) setHealthStatus(hs control.HealthStatus) {
}
}
+func (s *Server) CompareAndSwapHealthStatus(oldSt, newSt control.HealthStatus) (swapped bool) {
+ if swapped = s.healthStatus.CompareAndSwap(int32(oldSt), int32(newSt)); swapped {
+ s.notifySystemd(newSt)
+ if s.irMetrics != nil {
+ s.irMetrics.SetHealth(int32(newSt))
+ }
+ }
+ return
+}
+
// HealthStatus returns the current health status of the IR application.
func (s *Server) HealthStatus() control.HealthStatus {
return control.HealthStatus(s.healthStatus.Load())
@@ -186,6 +196,8 @@ func (s *Server) notifySystemd(st control.HealthStatus) {
err = sdnotify.FlagAndStatus(sdnotify.ReadyEnabled)
case control.HealthStatus_SHUTTING_DOWN:
err = sdnotify.FlagAndStatus(sdnotify.StoppingEnabled)
+ case control.HealthStatus_RECONFIGURING:
+ err = sdnotify.FlagAndStatus(sdnotify.ReloadingEnabled)
default:
err = sdnotify.Status(fmt.Sprintf("%v", st))
}
diff --git a/pkg/services/control/ir/types.pb.go b/pkg/services/control/ir/types.pb.go
index 828814b25..840e0be67 100644
--- a/pkg/services/control/ir/types.pb.go
+++ b/pkg/services/control/ir/types.pb.go
@@ -32,6 +32,8 @@ const (
HealthStatus_READY HealthStatus = 2
// IR application is shutting down.
HealthStatus_SHUTTING_DOWN HealthStatus = 3
+ // IR application is reconfiguring.
+ HealthStatus_RECONFIGURING HealthStatus = 4
)
// Enum value maps for HealthStatus.
@@ -41,12 +43,14 @@ var (
1: "STARTING",
2: "READY",
3: "SHUTTING_DOWN",
+ 4: "RECONFIGURING",
}
HealthStatus_value = map[string]int32{
"HEALTH_STATUS_UNDEFINED": 0,
"STARTING": 1,
"READY": 2,
"SHUTTING_DOWN": 3,
+ "RECONFIGURING": 4,
}
)
@@ -144,17 +148,19 @@ var file_pkg_services_control_ir_types_proto_rawDesc = []byte{
0x22, 0x36, 0x0a, 0x09, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x10, 0x0a,
0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12,
0x17, 0x0a, 0x04, 0x73, 0x69, 0x67, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x73,
- 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x2a, 0x57, 0x0a, 0x0c, 0x48, 0x65, 0x61, 0x6c,
+ 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x2a, 0x6a, 0x0a, 0x0c, 0x48, 0x65, 0x61, 0x6c,
0x74, 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1b, 0x0a, 0x17, 0x48, 0x45, 0x41, 0x4c,
0x54, 0x48, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x55, 0x4e, 0x44, 0x45, 0x46, 0x49,
0x4e, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x54, 0x41, 0x52, 0x54, 0x49, 0x4e,
0x47, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x52, 0x45, 0x41, 0x44, 0x59, 0x10, 0x02, 0x12, 0x11,
0x0a, 0x0d, 0x53, 0x48, 0x55, 0x54, 0x54, 0x49, 0x4e, 0x47, 0x5f, 0x44, 0x4f, 0x57, 0x4e, 0x10,
- 0x03, 0x42, 0x44, 0x5a, 0x42, 0x67, 0x69, 0x74, 0x2e, 0x66, 0x72, 0x6f, 0x73, 0x74, 0x66, 0x73,
- 0x2e, 0x69, 0x6e, 0x66, 0x6f, 0x2f, 0x54, 0x72, 0x75, 0x65, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x4c,
- 0x61, 0x62, 0x2f, 0x66, 0x72, 0x6f, 0x73, 0x74, 0x66, 0x73, 0x2d, 0x6e, 0x6f, 0x64, 0x65, 0x2f,
- 0x70, 0x6b, 0x67, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x69, 0x72, 0x2f,
- 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+ 0x03, 0x12, 0x11, 0x0a, 0x0d, 0x52, 0x45, 0x43, 0x4f, 0x4e, 0x46, 0x49, 0x47, 0x55, 0x52, 0x49,
+ 0x4e, 0x47, 0x10, 0x04, 0x42, 0x44, 0x5a, 0x42, 0x67, 0x69, 0x74, 0x2e, 0x66, 0x72, 0x6f, 0x73,
+ 0x74, 0x66, 0x73, 0x2e, 0x69, 0x6e, 0x66, 0x6f, 0x2f, 0x54, 0x72, 0x75, 0x65, 0x43, 0x6c, 0x6f,
+ 0x75, 0x64, 0x4c, 0x61, 0x62, 0x2f, 0x66, 0x72, 0x6f, 0x73, 0x74, 0x66, 0x73, 0x2d, 0x6e, 0x6f,
+ 0x64, 0x65, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f,
+ 0x69, 0x72, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x33,
}
var (
diff --git a/pkg/services/control/ir/types.proto b/pkg/services/control/ir/types.proto
index 9b6731cf8..901a55918 100644
--- a/pkg/services/control/ir/types.proto
+++ b/pkg/services/control/ir/types.proto
@@ -26,4 +26,7 @@ enum HealthStatus {
// IR application is shutting down.
SHUTTING_DOWN = 3;
+
+ // IR application is reconfiguring.
+ RECONFIGURING = 4;
}
From 5da41f1fe55f6bb906f0e05d2539ccf9a21cdfa6 Mon Sep 17 00:00:00 2001
From: Ekaterina Lebedeva
Date: Wed, 14 Aug 2024 13:55:34 +0300
Subject: [PATCH 032/705] Revert "[#1262] sdnotify: Get rid of go:linkname for
nanotime"
This reverts commit 327d364f34de730879f330ea51d8801f5c6bddc9.
Reverted due to the problem with reload signal sent by systemd.
`frostfs-ir` service reconfigures correctly and service's
statuses are being reported to systemd. However, since we
replaced `go:linkname` & `nanotime()` with `time.Since()`,
systemd refuses to accept reload signal response from
`frostfs-ir`. To maintain correct behaviour it was decided to
revevrt systemd-related changes until a better solution is
found.
Signed-off-by: Ekaterina Lebedeva
---
pkg/util/sdnotify/clock.go | 10 ++++++++++
pkg/util/sdnotify/clock.s | 2 ++
pkg/util/sdnotify/sdnotify.go | 4 +---
3 files changed, 13 insertions(+), 3 deletions(-)
create mode 100644 pkg/util/sdnotify/clock.go
create mode 100644 pkg/util/sdnotify/clock.s
diff --git a/pkg/util/sdnotify/clock.go b/pkg/util/sdnotify/clock.go
new file mode 100644
index 000000000..f5419d027
--- /dev/null
+++ b/pkg/util/sdnotify/clock.go
@@ -0,0 +1,10 @@
+package sdnotify
+
+import (
+ // For go:linkname to work.
+ _ "unsafe"
+)
+
+//go:noescape
+//go:linkname nanotime runtime.nanotime
+func nanotime() int64
diff --git a/pkg/util/sdnotify/clock.s b/pkg/util/sdnotify/clock.s
new file mode 100644
index 000000000..ad033ff4f
--- /dev/null
+++ b/pkg/util/sdnotify/clock.s
@@ -0,0 +1,2 @@
+// The file is intentionally empty.
+// It is a workaround for https://github.com/golang/go/issues/15006
diff --git a/pkg/util/sdnotify/sdnotify.go b/pkg/util/sdnotify/sdnotify.go
index a3af50b22..16a3f11c1 100644
--- a/pkg/util/sdnotify/sdnotify.go
+++ b/pkg/util/sdnotify/sdnotify.go
@@ -6,7 +6,6 @@ import (
"net"
"os"
"strings"
- "time"
)
const (
@@ -17,7 +16,6 @@ const (
var (
socket *net.UnixAddr
- start = time.Now()
errSocketVariableIsNotPresent = errors.New("\"NOTIFY_SOCKET\" environment variable is not present")
errSocketIsNotInitialized = errors.New("socket is not initialized")
@@ -53,7 +51,7 @@ func FlagAndStatus(status string) error {
// must be sent, containing "READY=1".
//
// For MONOTONIC_USEC format refer to https://www.man7.org/linux/man-pages/man3/sd_notify.3.html
- status += fmt.Sprintf("\nMONOTONIC_USEC=%d", uint64(time.Since(start))/1e3 /* microseconds in nanoseconds */)
+ status += fmt.Sprintf("\nMONOTONIC_USEC=%d", uint64(nanotime())/1e3 /* microseconds in nanoseconds */)
}
status += "\nSTATUS=" + strings.TrimSuffix(status, "=1")
return Send(status)
From ec1509de4ebbccbade51115c434996cd1fd9be7e Mon Sep 17 00:00:00 2001
From: Ekaterina Lebedeva
Date: Thu, 15 Aug 2024 17:12:38 +0300
Subject: [PATCH 033/705] [#1262] sdnotify: Send system monotonic time on
reload
The synchronized service reload protocol added in systemd version 253
requires that the service provides a MONOTONIC_USEC field alongside the
RELOADING=1 notification message for synchronization purposes. The value
carried in this field must be the system CLOCK_MONOTONIC timestamp at
the time the notification message was generated as systemd compares it
to other CLOCK_MONOTONIC timestamps taken by pid1.
Signed-off-by: Ekaterina Lebedeva
---
pkg/util/sdnotify/clock.go | 10 ----------
pkg/util/sdnotify/clock.s | 2 --
pkg/util/sdnotify/sdnotify.go | 9 ++++++++-
3 files changed, 8 insertions(+), 13 deletions(-)
delete mode 100644 pkg/util/sdnotify/clock.go
delete mode 100644 pkg/util/sdnotify/clock.s
diff --git a/pkg/util/sdnotify/clock.go b/pkg/util/sdnotify/clock.go
deleted file mode 100644
index f5419d027..000000000
--- a/pkg/util/sdnotify/clock.go
+++ /dev/null
@@ -1,10 +0,0 @@
-package sdnotify
-
-import (
- // For go:linkname to work.
- _ "unsafe"
-)
-
-//go:noescape
-//go:linkname nanotime runtime.nanotime
-func nanotime() int64
diff --git a/pkg/util/sdnotify/clock.s b/pkg/util/sdnotify/clock.s
deleted file mode 100644
index ad033ff4f..000000000
--- a/pkg/util/sdnotify/clock.s
+++ /dev/null
@@ -1,2 +0,0 @@
-// The file is intentionally empty.
-// It is a workaround for https://github.com/golang/go/issues/15006
diff --git a/pkg/util/sdnotify/sdnotify.go b/pkg/util/sdnotify/sdnotify.go
index 16a3f11c1..5235315cc 100644
--- a/pkg/util/sdnotify/sdnotify.go
+++ b/pkg/util/sdnotify/sdnotify.go
@@ -5,7 +5,10 @@ import (
"fmt"
"net"
"os"
+ "strconv"
"strings"
+
+ "golang.org/x/sys/unix"
)
const (
@@ -51,7 +54,11 @@ func FlagAndStatus(status string) error {
// must be sent, containing "READY=1".
//
// For MONOTONIC_USEC format refer to https://www.man7.org/linux/man-pages/man3/sd_notify.3.html
- status += fmt.Sprintf("\nMONOTONIC_USEC=%d", uint64(nanotime())/1e3 /* microseconds in nanoseconds */)
+ var ts unix.Timespec
+ if err := unix.ClockGettime(unix.CLOCK_MONOTONIC, &ts); err != nil {
+ return fmt.Errorf("clock_gettime: %w", err)
+ }
+ status += "\nMONOTONIC_USEC=" + strconv.FormatInt(ts.Nano()/1000, 10)
}
status += "\nSTATUS=" + strings.TrimSuffix(status, "=1")
return Send(status)
From a4a1c3f18ba75ba73de0fbc97bbccacf68625ee5 Mon Sep 17 00:00:00 2001
From: Airat Arifullin
Date: Mon, 12 Aug 2024 13:01:57 +0300
Subject: [PATCH 034/705] [#1307] go.mod: Bump frostfs-sdk-go/frostfs-api-go/v2
versions
* Also, resolve dependencies and conflicts for object service
by creating stub for `Patch` method.
Signed-off-by: Airat Arifullin
---
cmd/frostfs-node/object.go | 18 ++++++-
go.mod | 6 +--
go.sum | 12 ++---
pkg/network/transport/object/grpc/service.go | 42 +++++++++++++++
pkg/services/object/acl/v2/service.go | 20 +++++++
pkg/services/object/ape/service.go | 20 +++++++
pkg/services/object/audit.go | 56 ++++++++++++++++++++
pkg/services/object/common.go | 8 +++
pkg/services/object/metrics.go | 37 +++++++++++++
pkg/services/object/patch/service.go | 22 ++++++++
pkg/services/object/patch/streamer.go | 28 ++++++++++
pkg/services/object/response.go | 34 ++++++++++++
pkg/services/object/server.go | 7 +++
pkg/services/object/sign.go | 42 +++++++++++++++
pkg/services/object/transport_splitter.go | 4 ++
15 files changed, 345 insertions(+), 11 deletions(-)
create mode 100644 pkg/services/object/patch/service.go
create mode 100644 pkg/services/object/patch/streamer.go
diff --git a/cmd/frostfs-node/object.go b/cmd/frostfs-node/object.go
index 0124bf772..eef142415 100644
--- a/cmd/frostfs-node/object.go
+++ b/cmd/frostfs-node/object.go
@@ -28,6 +28,7 @@ import (
deletesvcV2 "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/delete/v2"
getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get"
getsvcV2 "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get/v2"
+ patchsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/patch"
putsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/put"
putsvcV2 "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/put/v2"
searchsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/search"
@@ -54,6 +55,8 @@ type objectSvc struct {
get *getsvcV2.Service
delete *deletesvcV2.Service
+
+ patch *patchsvc.Service
}
func (c *cfg) MaxObjectSize() uint64 {
@@ -71,6 +74,10 @@ func (s *objectSvc) Put() (objectService.PutObjectStream, error) {
return s.put.Put()
}
+func (s *objectSvc) Patch() (objectService.PatchObjectstream, error) {
+ return s.patch.Patch()
+}
+
func (s *objectSvc) PutSingle(ctx context.Context, req *object.PutSingleRequest) (*object.PutSingleResponse, error) {
return s.put.PutSingle(ctx, req)
}
@@ -181,10 +188,12 @@ func initObjectService(c *cfg) {
sDeleteV2 := createDeleteServiceV2(sDelete)
+ sPatch := createPatchSvc(sGet, sPut, keyStorage)
+
// build service pipeline
// grpc | audit | | signature | response | acl | ape | split
- splitSvc := createSplitService(c, sPutV2, sGetV2, sSearchV2, sDeleteV2)
+ splitSvc := createSplitService(c, sPutV2, sGetV2, sSearchV2, sDeleteV2, sPatch)
apeSvc := createAPEService(c, splitSvc)
@@ -353,6 +362,10 @@ func createPutSvcV2(sPut *putsvc.Service, keyStorage *util.KeyStorage) *putsvcV2
return putsvcV2.NewService(sPut, keyStorage)
}
+func createPatchSvc(sGet *getsvc.Service, sPut *putsvc.Service, keyStorage *util.KeyStorage) *patchsvc.Service {
+ return patchsvc.NewService(keyStorage, sGet, sPut)
+}
+
func createSearchSvc(c *cfg, keyStorage *util.KeyStorage, traverseGen *util.TraverserGenerator, coreConstructor *cache.ClientCache) *searchsvc.Service {
ls := c.cfgObject.cfgLocalStorage.localStorage
@@ -425,7 +438,7 @@ func createDeleteServiceV2(sDelete *deletesvc.Service) *deletesvcV2.Service {
}
func createSplitService(c *cfg, sPutV2 *putsvcV2.Service, sGetV2 *getsvcV2.Service,
- sSearchV2 *searchsvcV2.Service, sDeleteV2 *deletesvcV2.Service,
+ sSearchV2 *searchsvcV2.Service, sDeleteV2 *deletesvcV2.Service, sPatch *patchsvc.Service,
) *objectService.TransportSplitter {
return objectService.NewTransportSplitter(
c.cfgGRPC.maxChunkSize,
@@ -435,6 +448,7 @@ func createSplitService(c *cfg, sPutV2 *putsvcV2.Service, sGetV2 *getsvcV2.Servi
search: sSearchV2,
get: sGetV2,
delete: sDeleteV2,
+ patch: sPatch,
},
)
}
diff --git a/go.mod b/go.mod
index 196b4d463..93ed7d750 100644
--- a/go.mod
+++ b/go.mod
@@ -4,14 +4,14 @@ go 1.21
require (
code.gitea.io/sdk/gitea v0.17.1
- git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240726072425-3dfa2f4fd65e
+ git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240813155151-d112a28d382f
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20231101111734-b3ad3335ff65
- git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240726111349-9da46f566fec
+ git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240813155821-98aabc45a720
git.frostfs.info/TrueCloudLab/hrw v1.2.1
- git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240712081403-2628f6184984
+ git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240814080254-96225afacb88
git.frostfs.info/TrueCloudLab/tzhash v1.8.0
git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02
github.com/cheggaaa/pb v1.0.29
diff --git a/go.sum b/go.sum
index bd6d85882..803a065c3 100644
--- a/go.sum
+++ b/go.sum
@@ -1,7 +1,7 @@
code.gitea.io/sdk/gitea v0.17.1 h1:3jCPOG2ojbl8AcfaUCRYLT5MUcBMFwS0OSK2mA5Zok8=
code.gitea.io/sdk/gitea v0.17.1/go.mod h1:aCnBqhHpoEWA180gMbaCtdX9Pl6BWBAuuP2miadoTNM=
-git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240726072425-3dfa2f4fd65e h1:gEWT+70E/RvGkxtSv+PlyUN2vtJVymhQa1mypvrXukM=
-git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240726072425-3dfa2f4fd65e/go.mod h1:OBDSr+DqV1z4VDouoX3YMleNc4DPBVBWTG3WDT2PK1o=
+git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240813155151-d112a28d382f h1:xrJqsXOZeSkBFMSyN+PQ9DiCGxVULU3VIN/tuH/vtb8=
+git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240813155151-d112a28d382f/go.mod h1:mc7j6Cc1GU1tJZNmDwEYiJJ339biNnU1Bz3wZGogMe0=
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e h1:kcBqZBiFIUBATUqEuvVigtkJJWQ2Gug/eYXn967o3M4=
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e/go.mod h1:F/fe1OoIDKr5Bz99q4sriuHDuf3aZefZy9ZsCqEtgxc=
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSVCB8JNSfPG7Uk4r2oSk=
@@ -10,14 +10,14 @@ git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d/go.mod h1:7ZZq8iguY7qFsXajdHGmZd2AW4QbucyrJwhbsRfOfek=
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20231101111734-b3ad3335ff65 h1:PaZ8GpnUoXxUoNsc1qp36bT2u7FU+neU4Jn9cl8AWqI=
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20231101111734-b3ad3335ff65/go.mod h1:6aAX80dvJ3r5fjN9CzzPglRptoiPgIC9KFGGsUA+1Hw=
-git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240726111349-9da46f566fec h1:A09Swh7yogmmiABUf7Ht6MTQXJ07MyGx4+ziUQNelec=
-git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240726111349-9da46f566fec/go.mod h1:DlJmgV4/qkFkx2ab+YWznlMijiF2yZHnrJswJOB7XGs=
+git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240813155821-98aabc45a720 h1:15UXpW42bfshIv/X5kww92jG2o0drHgsdFd+UJ6zD7g=
+git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240813155821-98aabc45a720/go.mod h1:XRX/bBQsDJKr040N/a0YnDhxJqaUv1XyMVj3qxnb5K0=
git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc=
git.frostfs.info/TrueCloudLab/hrw v1.2.1/go.mod h1:C1Ygde2n843yTZEQ0FP69jYiuaYV0kriLvP4zm8JuvM=
git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20240726093631-5481339d6928 h1:LK3mCkNZkY48eBA9jnk1N0eQZLsZhOG+XYw4EBoKUjM=
git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20240726093631-5481339d6928/go.mod h1:bZyJexBlrja4ngxiBgo8by5pVHuAbhg9l09/8yVGDyg=
-git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240712081403-2628f6184984 h1:O3F2Grz07RWZ68mRz1xsYsNPNvZLwY00BM+xoYb1kNk=
-git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240712081403-2628f6184984/go.mod h1:SgioiGhQNWqiV5qpFAXRDJF81SEFRBhtwGEiU0FViyA=
+git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240814080254-96225afacb88 h1:vgbfkcnIexZUm3vREBBSa/Gv1Whjd1SFCUd0A+IaGPQ=
+git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240814080254-96225afacb88/go.mod h1:SgioiGhQNWqiV5qpFAXRDJF81SEFRBhtwGEiU0FViyA=
git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0 h1:M2KR3iBj7WpY3hP10IevfIB9MURr4O9mwVfJ+SjT3HA=
git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0/go.mod h1:okpbKfVYf/BpejtfFTfhZqFP+sZ8rsHrP8Rr/jYPNRc=
git.frostfs.info/TrueCloudLab/tzhash v1.8.0 h1:UFMnUIk0Zh17m8rjGHJMqku2hCgaXDqjqZzS4gsb4UA=
diff --git a/pkg/network/transport/object/grpc/service.go b/pkg/network/transport/object/grpc/service.go
index 7c6b395d5..d55e3d87f 100644
--- a/pkg/network/transport/object/grpc/service.go
+++ b/pkg/network/transport/object/grpc/service.go
@@ -24,6 +24,48 @@ func New(c objectSvc.ServiceServer) *Server {
}
}
+// Patch opens internal Object patch stream and feeds it by the data read from gRPC stream.
+func (s *Server) Patch(gStream objectGRPC.ObjectService_PatchServer) error {
+ stream, err := s.srv.Patch()
+ if err != nil {
+ return err
+ }
+
+ for {
+ req, err := gStream.Recv()
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ resp, err := stream.CloseAndRecv(gStream.Context())
+ if err != nil {
+ return err
+ }
+
+ return gStream.SendAndClose(resp.ToGRPCMessage().(*objectGRPC.PatchResponse))
+ }
+
+ return err
+ }
+
+ patchReq := new(object.PatchRequest)
+ if err := patchReq.FromGRPCMessage(req); err != nil {
+ return err
+ }
+
+ if err := stream.Send(gStream.Context(), patchReq); err != nil {
+ if errors.Is(err, util.ErrAbortStream) {
+ resp, err := stream.CloseAndRecv(gStream.Context())
+ if err != nil {
+ return err
+ }
+
+ return gStream.SendAndClose(resp.ToGRPCMessage().(*objectGRPC.PatchResponse))
+ }
+
+ return err
+ }
+ }
+}
+
// Put opens internal Object service Put stream and overtakes data from gRPC stream to it.
func (s *Server) Put(gStream objectGRPC.ObjectService_PutServer) error {
stream, err := s.srv.Put()
diff --git a/pkg/services/object/acl/v2/service.go b/pkg/services/object/acl/v2/service.go
index 3e128836f..58557d611 100644
--- a/pkg/services/object/acl/v2/service.go
+++ b/pkg/services/object/acl/v2/service.go
@@ -249,6 +249,26 @@ func (b Service) Put() (object.PutObjectStream, error) {
}, err
}
+type patchStreamBasicChecker struct {
+ next object.PatchObjectstream
+}
+
+func (p patchStreamBasicChecker) Send(ctx context.Context, request *objectV2.PatchRequest) error {
+ return p.next.Send(ctx, request)
+}
+
+func (p patchStreamBasicChecker) CloseAndRecv(ctx context.Context) (*objectV2.PatchResponse, error) {
+ return p.next.CloseAndRecv(ctx)
+}
+
+func (b Service) Patch() (object.PatchObjectstream, error) {
+ streamer, err := b.next.Patch()
+
+ return &patchStreamBasicChecker{
+ next: streamer,
+ }, err
+}
+
func (b Service) Head(
ctx context.Context,
request *objectV2.HeadRequest,
diff --git a/pkg/services/object/ape/service.go b/pkg/services/object/ape/service.go
index 2adb1b736..f005d0873 100644
--- a/pkg/services/object/ape/service.go
+++ b/pkg/services/object/ape/service.go
@@ -204,6 +204,26 @@ func (c *Service) Put() (objectSvc.PutObjectStream, error) {
}, err
}
+type patchStreamBasicChecker struct {
+ next objectSvc.PatchObjectstream
+}
+
+func (p *patchStreamBasicChecker) Send(ctx context.Context, request *objectV2.PatchRequest) error {
+ return p.next.Send(ctx, request)
+}
+
+func (p patchStreamBasicChecker) CloseAndRecv(ctx context.Context) (*objectV2.PatchResponse, error) {
+ return p.next.CloseAndRecv(ctx)
+}
+
+func (c *Service) Patch() (objectSvc.PatchObjectstream, error) {
+ streamer, err := c.next.Patch()
+
+ return &patchStreamBasicChecker{
+ next: streamer,
+ }, err
+}
+
func (c *Service) Head(ctx context.Context, request *objectV2.HeadRequest) (*objectV2.HeadResponse, error) {
cnrID, objID, err := getAddressParamsSDK(request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID())
if err != nil {
diff --git a/pkg/services/object/audit.go b/pkg/services/object/audit.go
index 1305fa008..680a96c40 100644
--- a/pkg/services/object/audit.go
+++ b/pkg/services/object/audit.go
@@ -170,3 +170,59 @@ func (a *auditPutStream) Send(ctx context.Context, req *object.PutRequest) error
}
return err
}
+
+type auditPatchStream struct {
+ stream PatchObjectstream
+ log *logger.Logger
+
+ failed bool
+ key []byte
+ containerID *refs.ContainerID
+ objectID *refs.ObjectID
+}
+
+func (a *auditService) Patch() (PatchObjectstream, error) {
+ res, err := a.next.Patch()
+ if !a.enabled.Load() {
+ return res, err
+ }
+ if err != nil {
+ audit.LogRequest(a.log, objectGRPC.ObjectService_Patch_FullMethodName, nil, nil, false)
+ return res, err
+ }
+ return &auditPatchStream{
+ stream: res,
+ log: a.log,
+ }, nil
+}
+
+// CloseAndRecv implements PutObjectStream.
+func (a *auditPatchStream) CloseAndRecv(ctx context.Context) (*object.PatchResponse, error) {
+ resp, err := a.stream.CloseAndRecv(ctx)
+ if err != nil {
+ a.failed = true
+ }
+ a.objectID = resp.GetBody().ObjectID
+ audit.LogRequestWithKey(a.log, objectGRPC.ObjectService_Patch_FullMethodName, a.key,
+ audit.TargetFromContainerIDObjectID(a.containerID, a.objectID),
+ !a.failed)
+ return resp, err
+}
+
+// Send implements PutObjectStream.
+func (a *auditPatchStream) Send(ctx context.Context, req *object.PatchRequest) error {
+ a.containerID = req.GetBody().GetAddress().GetContainerID()
+ a.objectID = req.GetBody().GetAddress().GetObjectID()
+ a.key = req.GetVerificationHeader().GetBodySignature().GetKey()
+
+ err := a.stream.Send(ctx, req)
+ if err != nil {
+ a.failed = true
+ }
+ if !errors.Is(err, util.ErrAbortStream) { // CloseAndRecv will not be called, so log here
+ audit.LogRequestWithKey(a.log, objectGRPC.ObjectService_Patch_FullMethodName, a.key,
+ audit.TargetFromContainerIDObjectID(a.containerID, a.objectID),
+ !a.failed)
+ }
+ return err
+}
diff --git a/pkg/services/object/common.go b/pkg/services/object/common.go
index 73ee9f81b..841a3d021 100644
--- a/pkg/services/object/common.go
+++ b/pkg/services/object/common.go
@@ -48,6 +48,14 @@ func (x *Common) Put() (PutObjectStream, error) {
return x.nextHandler.Put()
}
+func (x *Common) Patch() (PatchObjectstream, error) {
+ if x.state.IsMaintenance() {
+ return nil, new(apistatus.NodeUnderMaintenance)
+ }
+
+ return x.nextHandler.Patch()
+}
+
func (x *Common) Head(ctx context.Context, req *objectV2.HeadRequest) (*objectV2.HeadResponse, error) {
if x.state.IsMaintenance() {
return nil, new(apistatus.NodeUnderMaintenance)
diff --git a/pkg/services/object/metrics.go b/pkg/services/object/metrics.go
index f972f43ae..b64f879ac 100644
--- a/pkg/services/object/metrics.go
+++ b/pkg/services/object/metrics.go
@@ -27,6 +27,12 @@ type (
start time.Time
}
+ patchStreamMetric struct {
+ stream PatchObjectstream
+ metrics MetricRegister
+ start time.Time
+ }
+
MetricRegister interface {
AddRequestDuration(string, time.Duration, bool)
AddPayloadSize(string, int)
@@ -76,6 +82,24 @@ func (m MetricCollector) Put() (PutObjectStream, error) {
return m.next.Put()
}
+func (m MetricCollector) Patch() (PatchObjectstream, error) {
+ if m.enabled {
+ t := time.Now()
+
+ stream, err := m.next.Patch()
+ if err != nil {
+ return nil, err
+ }
+
+ return &patchStreamMetric{
+ stream: stream,
+ metrics: m.metrics,
+ start: t,
+ }, nil
+ }
+ return m.next.Patch()
+}
+
func (m MetricCollector) PutSingle(ctx context.Context, request *object.PutSingleRequest) (*object.PutSingleResponse, error) {
if m.enabled {
t := time.Now()
@@ -189,3 +213,16 @@ func (s putStreamMetric) CloseAndRecv(ctx context.Context) (*object.PutResponse,
return res, err
}
+func (s patchStreamMetric) Send(ctx context.Context, req *object.PatchRequest) error {
+ s.metrics.AddPayloadSize("Patch", len(req.GetBody().GetPatch().Chunk))
+
+ return s.stream.Send(ctx, req)
+}
+
+func (s patchStreamMetric) CloseAndRecv(ctx context.Context) (*object.PatchResponse, error) {
+ res, err := s.stream.CloseAndRecv(ctx)
+
+ s.metrics.AddRequestDuration("Patch", time.Since(s.start), err == nil)
+
+ return res, err
+}
diff --git a/pkg/services/object/patch/service.go b/pkg/services/object/patch/service.go
new file mode 100644
index 000000000..df6926e84
--- /dev/null
+++ b/pkg/services/object/patch/service.go
@@ -0,0 +1,22 @@
+package patchsvc
+
+import (
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
+ getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get"
+ putsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/put"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
+)
+
+// Service implements Put operation of Object service v2.
+type Service struct {
+}
+
+// NewService constructs Service instance from provided options.
+func NewService(_ *util.KeyStorage, _ *getsvc.Service, _ *putsvc.Service) *Service {
+ return &Service{}
+}
+
+// Put calls internal service and returns v2 object streamer.
+func (s *Service) Patch() (object.PatchObjectstream, error) {
+ return &Streamer{}, nil
+}
diff --git a/pkg/services/object/patch/streamer.go b/pkg/services/object/patch/streamer.go
new file mode 100644
index 000000000..5d021b9c3
--- /dev/null
+++ b/pkg/services/object/patch/streamer.go
@@ -0,0 +1,28 @@
+package patchsvc
+
+import (
+ "context"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+)
+
+// Streamer for the patch handler is a pipeline that merges two incoming
+// streams of patches and original object payload chunks.
+// The merged result is fed to Put stream target.
+type Streamer struct{}
+
+func (s *Streamer) Send(ctx context.Context, _ *object.PatchRequest) error {
+ _, span := tracing.StartSpanFromContext(ctx, "patch.streamer.Send")
+ defer span.End()
+
+ return nil
+}
+
+func (s *Streamer) CloseAndRecv(_ context.Context) (*object.PatchResponse, error) {
+ return &object.PatchResponse{
+ Body: &object.PatchResponseBody{
+ ObjectID: nil,
+ },
+ }, nil
+}
diff --git a/pkg/services/object/response.go b/pkg/services/object/response.go
index a10f26a34..c85259c1f 100644
--- a/pkg/services/object/response.go
+++ b/pkg/services/object/response.go
@@ -37,6 +37,11 @@ type putStreamResponser struct {
respSvc *response.Service
}
+type patchStreamResponser struct {
+ stream PatchObjectstream
+ respSvc *response.Service
+}
+
// NewResponseService returns object service instance that passes internal service
// call to response service.
func NewResponseService(objSvc ServiceServer, respSvc *response.Service) *ResponseService {
@@ -87,6 +92,35 @@ func (s *ResponseService) Put() (PutObjectStream, error) {
}, nil
}
+func (s *patchStreamResponser) Send(ctx context.Context, req *object.PatchRequest) error {
+ if err := s.stream.Send(ctx, req); err != nil {
+ return fmt.Errorf("could not send the request: %w", err)
+ }
+ return nil
+}
+
+func (s *patchStreamResponser) CloseAndRecv(ctx context.Context) (*object.PatchResponse, error) {
+ r, err := s.stream.CloseAndRecv(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("could not close stream and receive response: %w", err)
+ }
+
+ s.respSvc.SetMeta(r)
+ return r, nil
+}
+
+func (s *ResponseService) Patch() (PatchObjectstream, error) {
+ stream, err := s.svc.Patch()
+ if err != nil {
+ return nil, fmt.Errorf("could not create Put object streamer: %w", err)
+ }
+
+ return &patchStreamResponser{
+ stream: stream,
+ respSvc: s.respSvc,
+ }, nil
+}
+
func (s *ResponseService) PutSingle(ctx context.Context, req *object.PutSingleRequest) (*object.PutSingleResponse, error) {
resp, err := s.svc.PutSingle(ctx, req)
if err != nil {
diff --git a/pkg/services/object/server.go b/pkg/services/object/server.go
index 73b88f233..c1b036ab3 100644
--- a/pkg/services/object/server.go
+++ b/pkg/services/object/server.go
@@ -31,11 +31,18 @@ type PutObjectStream interface {
CloseAndRecv(context.Context) (*object.PutResponse, error)
}
+// PatchObjectstream is an interface of FrostFS API v2 compatible patch streamer.
+type PatchObjectstream interface {
+ Send(context.Context, *object.PatchRequest) error
+ CloseAndRecv(context.Context) (*object.PatchResponse, error)
+}
+
// ServiceServer is an interface of utility
// serving v2 Object service.
type ServiceServer interface {
Get(*object.GetRequest, GetObjectStream) error
Put() (PutObjectStream, error)
+ Patch() (PatchObjectstream, error)
Head(context.Context, *object.HeadRequest) (*object.HeadResponse, error)
Search(*object.SearchRequest, SearchStream) error
Delete(context.Context, *object.DeleteRequest) (*object.DeleteResponse, error)
diff --git a/pkg/services/object/sign.go b/pkg/services/object/sign.go
index 4bf581b78..631c539af 100644
--- a/pkg/services/object/sign.go
+++ b/pkg/services/object/sign.go
@@ -35,6 +35,12 @@ type putStreamSigner struct {
err error
}
+type patchStreamSigner struct {
+ sigSvc *util.SignService
+ stream PatchObjectstream
+ err error
+}
+
type getRangeStreamSigner struct {
GetObjectRangeStream
sigSvc *util.SignService
@@ -112,6 +118,42 @@ func (s *SignService) Put() (PutObjectStream, error) {
}, nil
}
+func (s *patchStreamSigner) Send(ctx context.Context, req *object.PatchRequest) error {
+ if s.err = s.sigSvc.VerifyRequest(req); s.err != nil {
+ return util.ErrAbortStream
+ }
+ if s.err = s.stream.Send(ctx, req); s.err != nil {
+ return util.ErrAbortStream
+ }
+ return nil
+}
+
+func (s *patchStreamSigner) CloseAndRecv(ctx context.Context) (resp *object.PatchResponse, err error) {
+ if s.err != nil {
+ err = s.err
+ resp = new(object.PatchResponse)
+ } else {
+ resp, err = s.stream.CloseAndRecv(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("could not close stream and receive response: %w", err)
+ }
+ }
+
+ return resp, s.sigSvc.SignResponse(resp, err)
+}
+
+func (s *SignService) Patch() (PatchObjectstream, error) {
+ stream, err := s.svc.Patch()
+ if err != nil {
+ return nil, fmt.Errorf("could not create Put object streamer: %w", err)
+ }
+
+ return &patchStreamSigner{
+ stream: stream,
+ sigSvc: s.sigSvc,
+ }, nil
+}
+
func (s *SignService) Head(ctx context.Context, req *object.HeadRequest) (*object.HeadResponse, error) {
if err := s.sigSvc.VerifyRequest(req); err != nil {
resp := new(object.HeadResponse)
diff --git a/pkg/services/object/transport_splitter.go b/pkg/services/object/transport_splitter.go
index 54e49cb12..5acfac06b 100644
--- a/pkg/services/object/transport_splitter.go
+++ b/pkg/services/object/transport_splitter.go
@@ -91,6 +91,10 @@ func (c TransportSplitter) Put() (PutObjectStream, error) {
return c.next.Put()
}
+func (c TransportSplitter) Patch() (PatchObjectstream, error) {
+ return c.next.Patch()
+}
+
func (c TransportSplitter) Head(ctx context.Context, request *object.HeadRequest) (*object.HeadResponse, error) {
return c.next.Head(ctx, request)
}
From e890f1b4b17d62cd95fe22d018212f91ff2be95d Mon Sep 17 00:00:00 2001
From: Airat Arifullin
Date: Mon, 12 Aug 2024 17:11:10 +0300
Subject: [PATCH 035/705] [#1307] object: Implement `Patch` method
Signed-off-by: Airat Arifullin
---
cmd/frostfs-node/object.go | 2 +-
pkg/services/object/acl/v2/service.go | 20 +-
pkg/services/object/acl/v2/util.go | 8 +-
pkg/services/object/ape/checker_test.go | 19 +-
pkg/services/object/ape/service.go | 20 +-
pkg/services/object/audit.go | 19 +-
pkg/services/object/common.go | 2 +-
pkg/services/object/get/prm.go | 4 +
pkg/services/object/metrics.go | 6 +-
pkg/services/object/patch/range_provider.go | 63 ++++++
pkg/services/object/patch/service.go | 30 ++-
pkg/services/object/patch/streamer.go | 215 +++++++++++++++++++-
pkg/services/object/patch/util.go | 53 +++++
pkg/services/object/put/prm.go | 11 +
pkg/services/object/put/streamer.go | 23 ++-
pkg/services/object/response.go | 4 +-
pkg/services/object/server.go | 6 +-
pkg/services/object/sign.go | 4 +-
pkg/services/object/transport_splitter.go | 2 +-
19 files changed, 430 insertions(+), 81 deletions(-)
create mode 100644 pkg/services/object/patch/range_provider.go
create mode 100644 pkg/services/object/patch/util.go
diff --git a/cmd/frostfs-node/object.go b/cmd/frostfs-node/object.go
index eef142415..467c5901b 100644
--- a/cmd/frostfs-node/object.go
+++ b/cmd/frostfs-node/object.go
@@ -74,7 +74,7 @@ func (s *objectSvc) Put() (objectService.PutObjectStream, error) {
return s.put.Put()
}
-func (s *objectSvc) Patch() (objectService.PatchObjectstream, error) {
+func (s *objectSvc) Patch() (objectService.PatchObjectStream, error) {
return s.patch.Patch()
}
diff --git a/pkg/services/object/acl/v2/service.go b/pkg/services/object/acl/v2/service.go
index 58557d611..a9ddad7ca 100644
--- a/pkg/services/object/acl/v2/service.go
+++ b/pkg/services/object/acl/v2/service.go
@@ -249,24 +249,8 @@ func (b Service) Put() (object.PutObjectStream, error) {
}, err
}
-type patchStreamBasicChecker struct {
- next object.PatchObjectstream
-}
-
-func (p patchStreamBasicChecker) Send(ctx context.Context, request *objectV2.PatchRequest) error {
- return p.next.Send(ctx, request)
-}
-
-func (p patchStreamBasicChecker) CloseAndRecv(ctx context.Context) (*objectV2.PatchResponse, error) {
- return p.next.CloseAndRecv(ctx)
-}
-
-func (b Service) Patch() (object.PatchObjectstream, error) {
- streamer, err := b.next.Patch()
-
- return &patchStreamBasicChecker{
- next: streamer,
- }, err
+func (b Service) Patch() (object.PatchObjectStream, error) {
+ return b.next.Patch()
}
func (b Service) Head(
diff --git a/pkg/services/object/acl/v2/util.go b/pkg/services/object/acl/v2/util.go
index feda6a3cf..76fd9651d 100644
--- a/pkg/services/object/acl/v2/util.go
+++ b/pkg/services/object/acl/v2/util.go
@@ -174,7 +174,7 @@ func isOwnerFromKey(id user.ID, key *keys.PublicKey) bool {
func assertVerb(tok sessionSDK.Object, op acl.Op) bool {
switch op {
case acl.OpObjectPut:
- return tok.AssertVerb(sessionSDK.VerbObjectPut, sessionSDK.VerbObjectDelete)
+ return tok.AssertVerb(sessionSDK.VerbObjectPut, sessionSDK.VerbObjectDelete, sessionSDK.VerbObjectPatch)
case acl.OpObjectDelete:
return tok.AssertVerb(sessionSDK.VerbObjectDelete)
case acl.OpObjectGet:
@@ -185,11 +185,13 @@ func assertVerb(tok sessionSDK.Object, op acl.Op) bool {
sessionSDK.VerbObjectGet,
sessionSDK.VerbObjectDelete,
sessionSDK.VerbObjectRange,
- sessionSDK.VerbObjectRangeHash)
+ sessionSDK.VerbObjectRangeHash,
+ sessionSDK.VerbObjectPatch,
+ )
case acl.OpObjectSearch:
return tok.AssertVerb(sessionSDK.VerbObjectSearch, sessionSDK.VerbObjectDelete)
case acl.OpObjectRange:
- return tok.AssertVerb(sessionSDK.VerbObjectRange, sessionSDK.VerbObjectRangeHash)
+ return tok.AssertVerb(sessionSDK.VerbObjectRange, sessionSDK.VerbObjectRangeHash, sessionSDK.VerbObjectPatch)
case acl.OpObjectHash:
return tok.AssertVerb(sessionSDK.VerbObjectRangeHash)
}
diff --git a/pkg/services/object/ape/checker_test.go b/pkg/services/object/ape/checker_test.go
index 090f6a83c..afe19fc51 100644
--- a/pkg/services/object/ape/checker_test.go
+++ b/pkg/services/object/ape/checker_test.go
@@ -518,7 +518,22 @@ func TestAPECheck_BearerTokenOverrides(t *testing.T) {
ls := inmemory.NewInmemoryLocalStorage()
ms := inmemory.NewInmemoryMorphRuleChainStorage()
- checker := NewChecker(ls, ms, headerProvider, frostfsidProvider, nil, &stMock{}, nil, nil)
+ node1Key, err := keys.NewPrivateKey()
+ require.NoError(t, err)
+ node1 := netmapSDK.NodeInfo{}
+ node1.SetPublicKey(node1Key.PublicKey().Bytes())
+ netmap := &netmapSDK.NetMap{}
+ netmap.SetEpoch(100)
+ netmap.SetNodes([]netmapSDK.NodeInfo{node1})
+
+ nm := &netmapStub{
+ currentEpoch: 100,
+ netmaps: map[uint64]*netmapSDK.NetMap{
+ 100: netmap,
+ },
+ }
+
+ checker := NewChecker(ls, ms, headerProvider, frostfsidProvider, nm, &stMock{}, nil, nil)
prm := Prm{
Method: method,
@@ -541,7 +556,7 @@ func TestAPECheck_BearerTokenOverrides(t *testing.T) {
}
}
- err := checker.CheckAPE(context.Background(), prm)
+ err = checker.CheckAPE(context.Background(), prm)
if test.expectAPEErr {
require.Error(t, err)
} else {
diff --git a/pkg/services/object/ape/service.go b/pkg/services/object/ape/service.go
index f005d0873..64dd19c24 100644
--- a/pkg/services/object/ape/service.go
+++ b/pkg/services/object/ape/service.go
@@ -204,24 +204,8 @@ func (c *Service) Put() (objectSvc.PutObjectStream, error) {
}, err
}
-type patchStreamBasicChecker struct {
- next objectSvc.PatchObjectstream
-}
-
-func (p *patchStreamBasicChecker) Send(ctx context.Context, request *objectV2.PatchRequest) error {
- return p.next.Send(ctx, request)
-}
-
-func (p patchStreamBasicChecker) CloseAndRecv(ctx context.Context) (*objectV2.PatchResponse, error) {
- return p.next.CloseAndRecv(ctx)
-}
-
-func (c *Service) Patch() (objectSvc.PatchObjectstream, error) {
- streamer, err := c.next.Patch()
-
- return &patchStreamBasicChecker{
- next: streamer,
- }, err
+func (c *Service) Patch() (objectSvc.PatchObjectStream, error) {
+ return c.next.Patch()
}
func (c *Service) Head(ctx context.Context, request *objectV2.HeadRequest) (*objectV2.HeadResponse, error) {
diff --git a/pkg/services/object/audit.go b/pkg/services/object/audit.go
index 680a96c40..b924386d1 100644
--- a/pkg/services/object/audit.go
+++ b/pkg/services/object/audit.go
@@ -172,16 +172,18 @@ func (a *auditPutStream) Send(ctx context.Context, req *object.PutRequest) error
}
type auditPatchStream struct {
- stream PatchObjectstream
+ stream PatchObjectStream
log *logger.Logger
failed bool
key []byte
containerID *refs.ContainerID
objectID *refs.ObjectID
+
+ nonFirstSend bool
}
-func (a *auditService) Patch() (PatchObjectstream, error) {
+func (a *auditService) Patch() (PatchObjectStream, error) {
res, err := a.next.Patch()
if !a.enabled.Load() {
return res, err
@@ -196,7 +198,7 @@ func (a *auditService) Patch() (PatchObjectstream, error) {
}, nil
}
-// CloseAndRecv implements PutObjectStream.
+// CloseAndRecv implements PatchObjectStream.
func (a *auditPatchStream) CloseAndRecv(ctx context.Context) (*object.PatchResponse, error) {
resp, err := a.stream.CloseAndRecv(ctx)
if err != nil {
@@ -209,11 +211,14 @@ func (a *auditPatchStream) CloseAndRecv(ctx context.Context) (*object.PatchRespo
return resp, err
}
-// Send implements PutObjectStream.
+// Send implements PatchObjectStream.
func (a *auditPatchStream) Send(ctx context.Context, req *object.PatchRequest) error {
- a.containerID = req.GetBody().GetAddress().GetContainerID()
- a.objectID = req.GetBody().GetAddress().GetObjectID()
- a.key = req.GetVerificationHeader().GetBodySignature().GetKey()
+ if !a.nonFirstSend {
+ a.containerID = req.GetBody().GetAddress().GetContainerID()
+ a.objectID = req.GetBody().GetAddress().GetObjectID()
+ a.key = req.GetVerificationHeader().GetBodySignature().GetKey()
+ a.nonFirstSend = true
+ }
err := a.stream.Send(ctx, req)
if err != nil {
diff --git a/pkg/services/object/common.go b/pkg/services/object/common.go
index 841a3d021..f48cc5b3d 100644
--- a/pkg/services/object/common.go
+++ b/pkg/services/object/common.go
@@ -48,7 +48,7 @@ func (x *Common) Put() (PutObjectStream, error) {
return x.nextHandler.Put()
}
-func (x *Common) Patch() (PatchObjectstream, error) {
+func (x *Common) Patch() (PatchObjectStream, error) {
if x.state.IsMaintenance() {
return nil, new(apistatus.NodeUnderMaintenance)
}
diff --git a/pkg/services/object/get/prm.go b/pkg/services/object/get/prm.go
index cbdb7a3e2..94c07381c 100644
--- a/pkg/services/object/get/prm.go
+++ b/pkg/services/object/get/prm.go
@@ -124,6 +124,10 @@ func (p *commonPrm) SetRequestForwarder(f RequestForwarder) {
p.forwarder = f
}
+func (p *commonPrm) SetSignerKey(signerKey *ecdsa.PrivateKey) {
+ p.signerKey = signerKey
+}
+
// WithAddress sets object address to be read.
func (p *commonPrm) WithAddress(addr oid.Address) {
p.addr = addr
diff --git a/pkg/services/object/metrics.go b/pkg/services/object/metrics.go
index b64f879ac..e53b7584f 100644
--- a/pkg/services/object/metrics.go
+++ b/pkg/services/object/metrics.go
@@ -28,7 +28,7 @@ type (
}
patchStreamMetric struct {
- stream PatchObjectstream
+ stream PatchObjectStream
metrics MetricRegister
start time.Time
}
@@ -82,7 +82,7 @@ func (m MetricCollector) Put() (PutObjectStream, error) {
return m.next.Put()
}
-func (m MetricCollector) Patch() (PatchObjectstream, error) {
+func (m MetricCollector) Patch() (PatchObjectStream, error) {
if m.enabled {
t := time.Now()
@@ -214,7 +214,7 @@ func (s putStreamMetric) CloseAndRecv(ctx context.Context) (*object.PutResponse,
return res, err
}
func (s patchStreamMetric) Send(ctx context.Context, req *object.PatchRequest) error {
- s.metrics.AddPayloadSize("Patch", len(req.GetBody().GetPatch().Chunk))
+ s.metrics.AddPayloadSize("Patch", len(req.GetBody().GetPatch().GetChunk()))
return s.stream.Send(ctx, req)
}
diff --git a/pkg/services/object/patch/range_provider.go b/pkg/services/object/patch/range_provider.go
new file mode 100644
index 000000000..755c5bf60
--- /dev/null
+++ b/pkg/services/object/patch/range_provider.go
@@ -0,0 +1,63 @@
+package patchsvc
+
+import (
+ "context"
+ "crypto/ecdsa"
+ "io"
+
+ getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get"
+ objectUtil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ patcherSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/patcher"
+)
+
+func (p *pipeChunkWriter) WriteChunk(_ context.Context, chunk []byte) error {
+ _, err := p.wr.Write(chunk)
+ return err
+}
+
+type rangeProvider struct {
+ getSvc *getsvc.Service
+
+ addr oid.Address
+
+ commonPrm *objectUtil.CommonPrm
+
+ localNodeKey *ecdsa.PrivateKey
+}
+
+var _ patcherSDK.RangeProvider = (*rangeProvider)(nil)
+
+func (r *rangeProvider) GetRange(ctx context.Context, rng *objectSDK.Range) io.Reader {
+ pipeReader, pipeWriter := io.Pipe()
+
+ var rngPrm getsvc.RangePrm
+ rngPrm.SetSignerKey(r.localNodeKey)
+ rngPrm.SetCommonParameters(r.commonPrm)
+
+ rngPrm.WithAddress(r.addr)
+ rngPrm.SetChunkWriter(&pipeChunkWriter{
+ wr: pipeWriter,
+ })
+ rngPrm.SetRange(rng)
+
+ getRangeErr := make(chan error)
+
+ go func() {
+ defer pipeWriter.Close()
+
+ select {
+ case <-ctx.Done():
+ pipeWriter.CloseWithError(ctx.Err())
+ case err := <-getRangeErr:
+ pipeWriter.CloseWithError(err)
+ }
+ }()
+
+ go func() {
+ getRangeErr <- r.getSvc.GetRange(ctx, rngPrm)
+ }()
+
+ return pipeReader
+}
diff --git a/pkg/services/object/patch/service.go b/pkg/services/object/patch/service.go
index df6926e84..c4ab15abf 100644
--- a/pkg/services/object/patch/service.go
+++ b/pkg/services/object/patch/service.go
@@ -9,14 +9,36 @@ import (
// Service implements Put operation of Object service v2.
type Service struct {
+ keyStorage *util.KeyStorage
+
+ getSvc *getsvc.Service
+
+ putSvc *putsvc.Service
}
// NewService constructs Service instance from provided options.
-func NewService(_ *util.KeyStorage, _ *getsvc.Service, _ *putsvc.Service) *Service {
- return &Service{}
+func NewService(ks *util.KeyStorage, getSvc *getsvc.Service, putSvc *putsvc.Service) *Service {
+ return &Service{
+ keyStorage: ks,
+
+ getSvc: getSvc,
+
+ putSvc: putSvc,
+ }
}
// Put calls internal service and returns v2 object streamer.
-func (s *Service) Patch() (object.PatchObjectstream, error) {
- return &Streamer{}, nil
+func (s *Service) Patch() (object.PatchObjectStream, error) {
+ nodeKey, err := s.keyStorage.GetKey(nil)
+ if err != nil {
+ return nil, err
+ }
+
+ return &Streamer{
+ getSvc: s.getSvc,
+
+ putSvc: s.putSvc,
+
+ localNodeKey: nodeKey,
+ }, nil
}
diff --git a/pkg/services/object/patch/streamer.go b/pkg/services/object/patch/streamer.go
index 5d021b9c3..84363530e 100644
--- a/pkg/services/object/patch/streamer.go
+++ b/pkg/services/object/patch/streamer.go
@@ -2,27 +2,220 @@ package patchsvc
import (
"context"
+ "crypto/ecdsa"
+ "errors"
+ "fmt"
+ "io"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ refsV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
+ getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get"
+ putsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/put"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/patcher"
)
-// Streamer for the patch handler is a pipeline that merges two incoming
-// streams of patches and original object payload chunks.
-// The merged result is fed to Put stream target.
-type Streamer struct{}
+// Streamer for the patch handler is a pipeline that merges two incoming streams of patches
+// and original object payload chunks. The merged result is fed to Put stream target.
+type Streamer struct {
+ // Patcher must be initialized at first Streamer.Send call.
+ patcher patcher.PatchApplier
-func (s *Streamer) Send(ctx context.Context, _ *object.PatchRequest) error {
- _, span := tracing.StartSpanFromContext(ctx, "patch.streamer.Send")
+ nonFirstSend bool
+
+ getSvc *getsvc.Service
+
+ putSvc *putsvc.Service
+
+ localNodeKey *ecdsa.PrivateKey
+}
+
+type pipeChunkWriter struct {
+ wr *io.PipeWriter
+}
+
+type headResponseWriter struct {
+ body *objectV2.HeadResponseBody
+}
+
+func (w *headResponseWriter) WriteHeader(_ context.Context, hdr *objectSDK.Object) error {
+ w.body.SetHeaderPart(toFullObjectHeader(hdr))
+ return nil
+}
+
+func toFullObjectHeader(hdr *objectSDK.Object) objectV2.GetHeaderPart {
+ obj := hdr.ToV2()
+
+ hs := new(objectV2.HeaderWithSignature)
+ hs.SetHeader(obj.GetHeader())
+ hs.SetSignature(obj.GetSignature())
+
+ return hs
+}
+
+func (s *Streamer) init(ctx context.Context, req *objectV2.PatchRequest) error {
+ hdrWithSig, addr, err := s.readHeader(ctx, req)
+ if err != nil {
+ return err
+ }
+
+ commonPrm, err := util.CommonPrmFromV2(req)
+ if err != nil {
+ return err
+ }
+ commonPrm.WithLocalOnly(false)
+
+ rangeProvider := &rangeProvider{
+ getSvc: s.getSvc,
+
+ addr: addr,
+
+ commonPrm: commonPrm,
+
+ localNodeKey: s.localNodeKey,
+ }
+
+ putstm, err := s.putSvc.Put()
+ if err != nil {
+ return err
+ }
+
+ hdr := hdrWithSig.GetHeader()
+ oV2 := new(objectV2.Object)
+ hV2 := new(objectV2.Header)
+ oV2.SetHeader(hV2)
+ oV2.GetHeader().SetContainerID(hdr.GetContainerID())
+ oV2.GetHeader().SetPayloadLength(hdr.GetPayloadLength())
+ oV2.GetHeader().SetAttributes(hdr.GetAttributes())
+
+ ownerID, err := newOwnerID(req.GetVerificationHeader())
+ if err != nil {
+ return err
+ }
+ oV2.GetHeader().SetOwnerID(ownerID)
+
+ prm, err := s.putInitPrm(req, oV2)
+ if err != nil {
+ return err
+ }
+
+ err = putstm.Init(ctx, prm)
+ if err != nil {
+ return err
+ }
+
+ patcherPrm := patcher.Params{
+ Header: objectSDK.NewFromV2(oV2),
+
+ RangeProvider: rangeProvider,
+
+ ObjectWriter: putstm.Target(),
+ }
+
+ s.patcher = patcher.New(patcherPrm)
+ return nil
+}
+
+func (s *Streamer) readHeader(ctx context.Context, req *objectV2.PatchRequest) (hdrWithSig *objectV2.HeaderWithSignature, addr oid.Address, err error) {
+ addrV2 := req.GetBody().GetAddress()
+ if addrV2 == nil {
+ err = errors.New("patch request has nil-address")
+ return
+ }
+
+ if err = addr.ReadFromV2(*addrV2); err != nil {
+ err = fmt.Errorf("read address error: %w", err)
+ return
+ }
+
+ commonPrm, err := util.CommonPrmFromV2(req)
+ if err != nil {
+ return
+ }
+ commonPrm.WithLocalOnly(false)
+
+ var p getsvc.HeadPrm
+ p.SetSignerKey(s.localNodeKey)
+ p.SetCommonParameters(commonPrm)
+
+ resp := new(objectV2.HeadResponse)
+ resp.SetBody(new(objectV2.HeadResponseBody))
+
+ p.WithAddress(addr)
+ p.SetHeaderWriter(&headResponseWriter{
+ body: resp.GetBody(),
+ })
+
+ err = s.getSvc.Head(ctx, p)
+ if err != nil {
+ err = fmt.Errorf("get header error: %w", err)
+ return
+ }
+
+ var ok bool
+ hdrPart := resp.GetBody().GetHeaderPart()
+ if hdrWithSig, ok = hdrPart.(*objectV2.HeaderWithSignature); !ok {
+ err = fmt.Errorf("unexpected header type: %T", hdrPart)
+ }
+ return
+}
+
+func (s *Streamer) Send(ctx context.Context, req *objectV2.PatchRequest) error {
+ ctx, span := tracing.StartSpanFromContext(ctx, "patch.streamer.Send")
defer span.End()
+ defer func() {
+ s.nonFirstSend = true
+ }()
+
+ if !s.nonFirstSend {
+ if err := s.init(ctx, req); err != nil {
+ return fmt.Errorf("streamer init error: %w", err)
+ }
+ }
+
+ patch := new(objectSDK.Patch)
+ patch.FromV2(req.GetBody())
+
+ if !s.nonFirstSend {
+ err := s.patcher.ApplyAttributesPatch(ctx, patch.NewAttributes, patch.ReplaceAttributes)
+ if err != nil {
+ return fmt.Errorf("patch attributes: %w", err)
+ }
+ }
+
+ if patch.PayloadPatch != nil {
+ err := s.patcher.ApplyPayloadPatch(ctx, patch.PayloadPatch)
+ if err != nil {
+ return fmt.Errorf("patch payload: %w", err)
+ }
+ } else if s.nonFirstSend {
+ return errors.New("invalid non-first patch: empty payload")
+ }
+
return nil
}
-func (s *Streamer) CloseAndRecv(_ context.Context) (*object.PatchResponse, error) {
- return &object.PatchResponse{
- Body: &object.PatchResponseBody{
- ObjectID: nil,
+func (s *Streamer) CloseAndRecv(ctx context.Context) (*objectV2.PatchResponse, error) {
+ patcherResp, err := s.patcher.Close(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ oidV2 := new(refsV2.ObjectID)
+
+ if patcherResp.AccessIdentifiers.ParentID != nil {
+ patcherResp.AccessIdentifiers.ParentID.WriteToV2(oidV2)
+ } else {
+ patcherResp.AccessIdentifiers.SelfID.WriteToV2(oidV2)
+ }
+
+ return &objectV2.PatchResponse{
+ Body: &objectV2.PatchResponseBody{
+ ObjectID: oidV2,
},
}, nil
}
diff --git a/pkg/services/object/patch/util.go b/pkg/services/object/patch/util.go
new file mode 100644
index 000000000..1218d6694
--- /dev/null
+++ b/pkg/services/object/patch/util.go
@@ -0,0 +1,53 @@
+package patchsvc
+
+import (
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "errors"
+ "fmt"
+
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
+ putsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/put"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
+)
+
+// putInitPrm initializes put paramerer for Put stream.
+func (s *Streamer) putInitPrm(req *objectV2.PatchRequest, obj *objectV2.Object) (*putsvc.PutInitPrm, error) {
+ commonPrm, err := util.CommonPrmFromV2(req)
+ if err != nil {
+ return nil, err
+ }
+
+ prm := new(putsvc.PutInitPrm)
+ prm.WithObject(objectSDK.NewFromV2(obj)).
+ WithCommonPrm(commonPrm).
+ WithPrivateKey(s.localNodeKey)
+
+ return prm, nil
+}
+
+func newOwnerID(vh *session.RequestVerificationHeader) (*refs.OwnerID, error) {
+ for vh.GetOrigin() != nil {
+ vh = vh.GetOrigin()
+ }
+ sig := vh.GetBodySignature()
+ if sig == nil {
+ return nil, errors.New("empty body signature")
+ }
+ key, err := keys.NewPublicKeyFromBytes(sig.GetKey(), elliptic.P256())
+ if err != nil {
+ return nil, fmt.Errorf("invalid signature key: %w", err)
+ }
+
+ var userID user.ID
+ user.IDFromKey(&userID, (ecdsa.PublicKey)(*key))
+ ownID := new(refs.OwnerID)
+ userID.WriteToV2(ownID)
+
+ return ownID, nil
+}
diff --git a/pkg/services/object/put/prm.go b/pkg/services/object/put/prm.go
index 52a7c102c..0c8f12b45 100644
--- a/pkg/services/object/put/prm.go
+++ b/pkg/services/object/put/prm.go
@@ -2,6 +2,7 @@ package putsvc
import (
"context"
+ "crypto/ecdsa"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
@@ -20,6 +21,8 @@ type PutInitPrm struct {
traverseOpts []placement.Option
relay func(context.Context, client.NodeInfo, client.MultiAddressClient) error
+
+ privateKey *ecdsa.PrivateKey
}
type PutChunkPrm struct {
@@ -65,3 +68,11 @@ func (p *PutChunkPrm) WithChunk(v []byte) *PutChunkPrm {
return p
}
+
+func (p *PutInitPrm) WithPrivateKey(v *ecdsa.PrivateKey) *PutInitPrm {
+ if p != nil {
+ p.privateKey = v
+ }
+
+ return p
+}
diff --git a/pkg/services/object/put/streamer.go b/pkg/services/object/put/streamer.go
index 4e655ed54..969c8fa19 100644
--- a/pkg/services/object/put/streamer.go
+++ b/pkg/services/object/put/streamer.go
@@ -47,6 +47,11 @@ func (p *Streamer) Init(ctx context.Context, prm *PutInitPrm) error {
return nil
}
+// Target accesses underlying target chunked object writer.
+func (p *Streamer) Target() transformer.ChunkedObjectWriter {
+ return p.target
+}
+
// MaxObjectSize returns maximum payload size for the streaming session.
//
// Must be called after the successful Init.
@@ -79,11 +84,15 @@ func (p *Streamer) initTarget(prm *PutInitPrm) error {
func (p *Streamer) initUntrustedTarget(prm *PutInitPrm) error {
p.relay = prm.relay
- nodeKey, err := p.cfg.keyStorage.GetKey(nil)
- if err != nil {
- return err
+ if prm.privateKey != nil {
+ p.privateKey = prm.privateKey
+ } else {
+ nodeKey, err := p.cfg.keyStorage.GetKey(nil)
+ if err != nil {
+ return err
+ }
+ p.privateKey = nodeKey
}
- p.privateKey = nodeKey
// prepare untrusted-Put object target
p.target = &validatingPreparedTarget{
@@ -136,7 +145,11 @@ func (p *Streamer) initTrustedTarget(prm *PutInitPrm) error {
}
}
- p.privateKey = key
+ if prm.privateKey != nil {
+ p.privateKey = prm.privateKey
+ } else {
+ p.privateKey = key
+ }
p.target = &validatingTarget{
fmt: p.fmtValidator,
nextTarget: transformer.NewPayloadSizeLimiter(transformer.Params{
diff --git a/pkg/services/object/response.go b/pkg/services/object/response.go
index c85259c1f..d7ba9f843 100644
--- a/pkg/services/object/response.go
+++ b/pkg/services/object/response.go
@@ -38,7 +38,7 @@ type putStreamResponser struct {
}
type patchStreamResponser struct {
- stream PatchObjectstream
+ stream PatchObjectStream
respSvc *response.Service
}
@@ -109,7 +109,7 @@ func (s *patchStreamResponser) CloseAndRecv(ctx context.Context) (*object.PatchR
return r, nil
}
-func (s *ResponseService) Patch() (PatchObjectstream, error) {
+func (s *ResponseService) Patch() (PatchObjectStream, error) {
stream, err := s.svc.Patch()
if err != nil {
return nil, fmt.Errorf("could not create Put object streamer: %w", err)
diff --git a/pkg/services/object/server.go b/pkg/services/object/server.go
index c1b036ab3..da98ce245 100644
--- a/pkg/services/object/server.go
+++ b/pkg/services/object/server.go
@@ -31,8 +31,8 @@ type PutObjectStream interface {
CloseAndRecv(context.Context) (*object.PutResponse, error)
}
-// PatchObjectstream is an interface of FrostFS API v2 compatible patch streamer.
-type PatchObjectstream interface {
+// PatchObjectStream is an interface of FrostFS API v2 compatible patch streamer.
+type PatchObjectStream interface {
Send(context.Context, *object.PatchRequest) error
CloseAndRecv(context.Context) (*object.PatchResponse, error)
}
@@ -42,7 +42,7 @@ type PatchObjectstream interface {
type ServiceServer interface {
Get(*object.GetRequest, GetObjectStream) error
Put() (PutObjectStream, error)
- Patch() (PatchObjectstream, error)
+ Patch() (PatchObjectStream, error)
Head(context.Context, *object.HeadRequest) (*object.HeadResponse, error)
Search(*object.SearchRequest, SearchStream) error
Delete(context.Context, *object.DeleteRequest) (*object.DeleteResponse, error)
diff --git a/pkg/services/object/sign.go b/pkg/services/object/sign.go
index 631c539af..35367aafe 100644
--- a/pkg/services/object/sign.go
+++ b/pkg/services/object/sign.go
@@ -37,7 +37,7 @@ type putStreamSigner struct {
type patchStreamSigner struct {
sigSvc *util.SignService
- stream PatchObjectstream
+ stream PatchObjectStream
err error
}
@@ -142,7 +142,7 @@ func (s *patchStreamSigner) CloseAndRecv(ctx context.Context) (resp *object.Patc
return resp, s.sigSvc.SignResponse(resp, err)
}
-func (s *SignService) Patch() (PatchObjectstream, error) {
+func (s *SignService) Patch() (PatchObjectStream, error) {
stream, err := s.svc.Patch()
if err != nil {
return nil, fmt.Errorf("could not create Put object streamer: %w", err)
diff --git a/pkg/services/object/transport_splitter.go b/pkg/services/object/transport_splitter.go
index 5acfac06b..e560d6d8c 100644
--- a/pkg/services/object/transport_splitter.go
+++ b/pkg/services/object/transport_splitter.go
@@ -91,7 +91,7 @@ func (c TransportSplitter) Put() (PutObjectStream, error) {
return c.next.Put()
}
-func (c TransportSplitter) Patch() (PatchObjectstream, error) {
+func (c TransportSplitter) Patch() (PatchObjectStream, error) {
return c.next.Patch()
}
From 5ed317e24c8ddc505051a3d012eff73828238a4d Mon Sep 17 00:00:00 2001
From: Airat Arifullin
Date: Tue, 13 Aug 2024 15:01:01 +0300
Subject: [PATCH 036/705] [#1307] cli: Introduce `object patch` command
Signed-off-by: Airat Arifullin
---
cmd/frostfs-cli/internal/client/client.go | 65 ++++++++++
cmd/frostfs-cli/modules/object/patch.go | 151 ++++++++++++++++++++++
cmd/frostfs-cli/modules/object/root.go | 2 +
cmd/frostfs-cli/modules/object/util.go | 2 +
4 files changed, 220 insertions(+)
create mode 100644 cmd/frostfs-cli/modules/object/patch.go
diff --git a/cmd/frostfs-cli/internal/client/client.go b/cmd/frostfs-cli/internal/client/client.go
index 215490dbe..a0fa22410 100644
--- a/cmd/frostfs-cli/internal/client/client.go
+++ b/cmd/frostfs-cli/internal/client/client.go
@@ -2,10 +2,13 @@ package internal
import (
"bytes"
+ "cmp"
"context"
"errors"
"fmt"
"io"
+ "os"
+ "slices"
"sort"
"strings"
@@ -869,3 +872,65 @@ func SyncContainerSettings(ctx context.Context, prm SyncContainerPrm) (*SyncCont
return new(SyncContainerRes), nil
}
+
+// PatchObjectPrm groups parameters of PatchObject operation.
+type PatchObjectPrm struct {
+ commonObjectPrm
+ objectAddressPrm
+
+ NewAttributes []objectSDK.Attribute
+
+ ReplaceAttribute bool
+
+ PayloadPatches []PayloadPatch
+}
+
+type PayloadPatch struct {
+ Range objectSDK.Range
+
+ PayloadPath string
+}
+
+type PatchRes struct {
+ OID oid.ID
+}
+
+func Patch(ctx context.Context, prm PatchObjectPrm) (*PatchRes, error) {
+ patchPrm := client.PrmObjectPatch{
+ XHeaders: prm.xHeaders,
+ BearerToken: prm.bearerToken,
+ Session: prm.sessionToken,
+ Address: prm.objAddr,
+ }
+
+ slices.SortFunc(prm.PayloadPatches, func(a, b PayloadPatch) int {
+ return cmp.Compare(a.Range.GetOffset(), b.Range.GetOffset())
+ })
+
+ patcher, err := prm.cli.ObjectPatchInit(ctx, patchPrm)
+ if err != nil {
+ return nil, fmt.Errorf("init payload reading: %w", err)
+ }
+
+ if patcher.PatchAttributes(ctx, prm.NewAttributes, prm.ReplaceAttribute) {
+ for _, pp := range prm.PayloadPatches {
+ payloadFile, err := os.OpenFile(pp.PayloadPath, os.O_RDONLY, os.ModePerm)
+ if err != nil {
+ return nil, err
+ }
+ applied := patcher.PatchPayload(ctx, &pp.Range, payloadFile)
+ _ = payloadFile.Close()
+ if !applied {
+ break
+ }
+ }
+ }
+
+ res, err := patcher.Close(ctx)
+ if err != nil {
+ return nil, err
+ }
+ return &PatchRes{
+ OID: res.ObjectID(),
+ }, nil
+}
diff --git a/cmd/frostfs-cli/modules/object/patch.go b/cmd/frostfs-cli/modules/object/patch.go
new file mode 100644
index 000000000..8f03885ab
--- /dev/null
+++ b/cmd/frostfs-cli/modules/object/patch.go
@@ -0,0 +1,151 @@
+package object
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+
+ internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
+ commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "github.com/spf13/cobra"
+)
+
+const (
+ newAttrsFlagName = "new-attrs"
+ replaceAttrsFlagName = "replace-attrs"
+ rangeFlagName = "range"
+ payloadFlagName = "payload"
+)
+
+var objectPatchCmd = &cobra.Command{
+ Use: "patch",
+ Run: patch,
+ Short: "Patch FrostFS object",
+ Long: "Patch FrostFS object. Each range passed to the command requires to pass a corresponding patch payload.",
+ Example: `
+frostfs-cli -c config.yml -r 127.0.0.1:8080 object patch --cid --oid --new-attrs 'key1=val1,key2=val2' --replace-attrs
+frostfs-cli -c config.yml -r 127.0.0.1:8080 object patch --cid --oid --range offX:lnX --payload /path/to/payloadX --range offY:lnY --payload /path/to/payloadY
+frostfs-cli -c config.yml -r 127.0.0.1:8080 object patch --cid --oid --new-attrs 'key1=val1,key2=val2' --replace-attrs --range offX:lnX --payload /path/to/payload
+`,
+}
+
+func initObjectPatchCmd() {
+ commonflags.Init(objectPatchCmd)
+ initFlagSession(objectPatchCmd, "PATCH")
+
+ flags := objectPatchCmd.Flags()
+
+ flags.String(commonflags.CIDFlag, "", commonflags.CIDFlagUsage)
+ _ = objectRangeCmd.MarkFlagRequired(commonflags.CIDFlag)
+
+ flags.String(commonflags.OIDFlag, "", commonflags.OIDFlagUsage)
+ _ = objectRangeCmd.MarkFlagRequired(commonflags.OIDFlag)
+
+ flags.String(newAttrsFlagName, "", "New object attributes in form of Key1=Value1,Key2=Value2")
+ flags.Bool(replaceAttrsFlagName, false, "Replace object attributes by new ones.")
+ flags.StringSlice(rangeFlagName, []string{}, "Range to which patch payload is applied. Format: offset:length")
+ flags.StringSlice(payloadFlagName, []string{}, "Path to file with patch payload.")
+}
+
+func patch(cmd *cobra.Command, _ []string) {
+ var cnr cid.ID
+ var obj oid.ID
+
+ objAddr := readObjectAddress(cmd, &cnr, &obj)
+
+ ranges, err := getRangeSlice(cmd)
+ commonCmd.ExitOnErr(cmd, "", err)
+
+ payloads := patchPayloadPaths(cmd)
+
+ if len(ranges) != len(payloads) {
+ commonCmd.ExitOnErr(cmd, "", fmt.Errorf("the number of ranges and payloads are not equal: ranges = %d, payloads = %d", len(ranges), len(payloads)))
+ }
+
+ newAttrs, err := parseNewObjectAttrs(cmd)
+ commonCmd.ExitOnErr(cmd, "can't parse new object attributes: %w", err)
+ replaceAttrs, _ := cmd.Flags().GetBool(replaceAttrsFlagName)
+
+ pk := key.GetOrGenerate(cmd)
+
+ cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC)
+
+ var prm internalclient.PatchObjectPrm
+ prm.SetClient(cli)
+ Prepare(cmd, &prm)
+ ReadOrOpenSession(cmd, &prm, pk, cnr, nil)
+
+ prm.SetAddress(objAddr)
+ prm.NewAttributes = newAttrs
+ prm.ReplaceAttribute = replaceAttrs
+
+ for i := range ranges {
+ prm.PayloadPatches = append(prm.PayloadPatches, internalclient.PayloadPatch{
+ Range: ranges[i],
+ PayloadPath: payloads[i],
+ })
+ }
+
+ res, err := internalclient.Patch(cmd.Context(), prm)
+ if err != nil {
+ commonCmd.ExitOnErr(cmd, "can't patch the object: %w", err)
+ }
+ cmd.Println("Patched object ID: ", res.OID.EncodeToString())
+}
+
+func parseNewObjectAttrs(cmd *cobra.Command) ([]objectSDK.Attribute, error) {
+ var rawAttrs []string
+
+ raw := cmd.Flag(newAttrsFlagName).Value.String()
+ if len(raw) != 0 {
+ rawAttrs = strings.Split(raw, ",")
+ }
+
+ attrs := make([]objectSDK.Attribute, len(rawAttrs), len(rawAttrs)+2) // name + timestamp attributes
+ for i := range rawAttrs {
+ k, v, found := strings.Cut(rawAttrs[i], "=")
+ if !found {
+ return nil, fmt.Errorf("invalid attribute format: %s", rawAttrs[i])
+ }
+ attrs[i].SetKey(k)
+ attrs[i].SetValue(v)
+ }
+ return attrs, nil
+}
+
+func getRangeSlice(cmd *cobra.Command) ([]objectSDK.Range, error) {
+ v, _ := cmd.Flags().GetStringSlice(rangeFlagName)
+ if len(v) == 0 {
+ return []objectSDK.Range{}, nil
+ }
+ rs := make([]objectSDK.Range, len(v))
+ for i := range v {
+ before, after, found := strings.Cut(v[i], rangeSep)
+ if !found {
+ return nil, fmt.Errorf("invalid range specifier: %s", v[i])
+ }
+
+ offset, err := strconv.ParseUint(before, 10, 64)
+ if err != nil {
+ return nil, fmt.Errorf("invalid '%s' range offset specifier: %w", v[i], err)
+ }
+ length, err := strconv.ParseUint(after, 10, 64)
+ if err != nil {
+ return nil, fmt.Errorf("invalid '%s' range length specifier: %w", v[i], err)
+ }
+
+ rs[i].SetOffset(offset)
+ rs[i].SetLength(length)
+ }
+ return rs, nil
+}
+
+func patchPayloadPaths(cmd *cobra.Command) []string {
+ v, _ := cmd.Flags().GetStringSlice(payloadFlagName)
+ return v
+}
diff --git a/cmd/frostfs-cli/modules/object/root.go b/cmd/frostfs-cli/modules/object/root.go
index 7d8008b10..b808a509e 100644
--- a/cmd/frostfs-cli/modules/object/root.go
+++ b/cmd/frostfs-cli/modules/object/root.go
@@ -29,6 +29,7 @@ func init() {
objectRangeCmd,
objectLockCmd,
objectNodesCmd,
+ objectPatchCmd,
}
Cmd.AddCommand(objectChildCommands...)
@@ -39,6 +40,7 @@ func init() {
}
initObjectPutCmd()
+ initObjectPatchCmd()
initObjectDeleteCmd()
initObjectGetCmd()
initObjectSearchCmd()
diff --git a/cmd/frostfs-cli/modules/object/util.go b/cmd/frostfs-cli/modules/object/util.go
index 381c790e9..96b80fe1b 100644
--- a/cmd/frostfs-cli/modules/object/util.go
+++ b/cmd/frostfs-cli/modules/object/util.go
@@ -306,6 +306,8 @@ func finalizeSession(cmd *cobra.Command, dst SessionPrm, tok *session.Object, ke
case *internal.PutObjectPrm:
common.PrintVerbose(cmd, "Binding session to object PUT...")
tok.ForVerb(session.VerbObjectPut)
+ case *internal.PatchObjectPrm:
+ tok.ForVerb(session.VerbObjectPatch)
case *internal.DeleteObjectPrm:
common.PrintVerbose(cmd, "Binding session to object DELETE...")
tok.ForVerb(session.VerbObjectDelete)
From eeab417dcf75846b69c0567b118dab5fb27a28a2 Mon Sep 17 00:00:00 2001
From: Airat Arifullin
Date: Wed, 14 Aug 2024 14:38:01 +0300
Subject: [PATCH 037/705] [#1307] object: Add APE check for `Patch` handler
Signed-off-by: Airat Arifullin
---
pkg/services/object/acl/v2/service.go | 125 +++++++++++++++++++++++-
pkg/services/object/acl/v2/util.go | 2 +
pkg/services/object/ape/checker_test.go | 19 +---
pkg/services/object/ape/request.go | 3 +-
pkg/services/object/ape/service.go | 54 +++++++++-
5 files changed, 183 insertions(+), 20 deletions(-)
diff --git a/pkg/services/object/acl/v2/service.go b/pkg/services/object/acl/v2/service.go
index a9ddad7ca..5a8e8b065 100644
--- a/pkg/services/object/acl/v2/service.go
+++ b/pkg/services/object/acl/v2/service.go
@@ -35,6 +35,12 @@ type putStreamBasicChecker struct {
next object.PutObjectStream
}
+type patchStreamBasicChecker struct {
+ source *Service
+ next object.PatchObjectStream
+ nonFirstSend bool
+}
+
type getStreamBasicChecker struct {
checker ACLChecker
@@ -250,7 +256,12 @@ func (b Service) Put() (object.PutObjectStream, error) {
}
func (b Service) Patch() (object.PatchObjectStream, error) {
- return b.next.Patch()
+ streamer, err := b.next.Patch()
+
+ return &patchStreamBasicChecker{
+ source: &b,
+ next: streamer,
+ }, err
}
func (b Service) Head(
@@ -738,6 +749,65 @@ func (g *searchStreamBasicChecker) Send(resp *objectV2.SearchResponse) error {
return g.SearchStream.Send(resp)
}
+func (p *patchStreamBasicChecker) Send(ctx context.Context, request *objectV2.PatchRequest) error {
+ body := request.GetBody()
+ if body == nil {
+ return errEmptyBody
+ }
+
+ if !p.nonFirstSend {
+ p.nonFirstSend = true
+
+ cnr, err := getContainerIDFromRequest(request)
+ if err != nil {
+ return err
+ }
+
+ objV2 := request.GetBody().GetAddress().GetObjectID()
+ if objV2 == nil {
+ return errors.New("missing oid")
+ }
+ obj := new(oid.ID)
+ err = obj.ReadFromV2(*objV2)
+ if err != nil {
+ return err
+ }
+
+ var sTok *sessionSDK.Object
+ sTok, err = readSessionToken(cnr, obj, request.GetMetaHeader().GetSessionToken())
+ if err != nil {
+ return err
+ }
+
+ bTok, err := originalBearerToken(request.GetMetaHeader())
+ if err != nil {
+ return err
+ }
+
+ req := MetaWithToken{
+ vheader: request.GetVerificationHeader(),
+ token: sTok,
+ bearer: bTok,
+ src: request,
+ }
+
+ reqInfo, err := p.source.findRequestInfoWithoutACLOperationAssert(req, cnr)
+ if err != nil {
+ return err
+ }
+
+ reqInfo.obj = obj
+
+ ctx = requestContext(ctx, reqInfo)
+ }
+
+ return p.next.Send(ctx, request)
+}
+
+func (p patchStreamBasicChecker) CloseAndRecv(ctx context.Context) (*objectV2.PatchResponse, error) {
+ return p.next.CloseAndRecv(ctx)
+}
+
func (b Service) findRequestInfo(req MetaWithToken, idCnr cid.ID, op acl.Op) (info RequestInfo, err error) {
cnr, err := b.containers.Get(idCnr) // fetch actual container
if err != nil {
@@ -794,3 +864,56 @@ func (b Service) findRequestInfo(req MetaWithToken, idCnr cid.ID, op acl.Op) (in
return info, nil
}
+
+// findRequestInfoWithoutACLOperationAssert is findRequestInfo without session token verb assert.
+func (b Service) findRequestInfoWithoutACLOperationAssert(req MetaWithToken, idCnr cid.ID) (info RequestInfo, err error) {
+ cnr, err := b.containers.Get(idCnr) // fetch actual container
+ if err != nil {
+ return info, err
+ }
+
+ if req.token != nil {
+ currentEpoch, err := b.nm.Epoch()
+ if err != nil {
+ return info, errors.New("can't fetch current epoch")
+ }
+ if req.token.ExpiredAt(currentEpoch) {
+ return info, new(apistatus.SessionTokenExpired)
+ }
+ if req.token.InvalidAt(currentEpoch) {
+ return info, fmt.Errorf("%s: token is invalid at %d epoch)",
+ invalidRequestMessage, currentEpoch)
+ }
+ }
+
+ // find request role and key
+ ownerID, ownerKey, err := req.RequestOwner()
+ if err != nil {
+ return info, err
+ }
+ res, err := b.c.Classify(ownerID, ownerKey, idCnr, cnr.Value)
+ if err != nil {
+ return info, err
+ }
+
+ info.basicACL = cnr.Value.BasicACL()
+ info.requestRole = res.Role
+ info.cnrOwner = cnr.Value.Owner()
+ info.idCnr = idCnr
+
+ cnrNamespace, hasNamespace := strings.CutSuffix(cnrSDK.ReadDomain(cnr.Value).Zone(), ".ns")
+ if hasNamespace {
+ info.cnrNamespace = cnrNamespace
+ }
+
+ // it is assumed that at the moment the key will be valid,
+ // otherwise the request would not pass validation
+ info.senderKey = res.Key
+
+ // add bearer token if it is present in request
+ info.bearer = req.bearer
+
+ info.srcRequest = req.src
+
+ return info, nil
+}
diff --git a/pkg/services/object/acl/v2/util.go b/pkg/services/object/acl/v2/util.go
index 76fd9651d..c5225e8c4 100644
--- a/pkg/services/object/acl/v2/util.go
+++ b/pkg/services/object/acl/v2/util.go
@@ -46,6 +46,8 @@ func getContainerIDFromRequest(req any) (cid.ID, error) {
idV2 = v.GetBody().GetAddress().GetContainerID()
case *objectV2.PutSingleRequest:
idV2 = v.GetBody().GetObject().GetHeader().GetContainerID()
+ case *objectV2.PatchRequest:
+ idV2 = v.GetBody().GetAddress().GetContainerID()
default:
return cid.ID{}, errors.New("unknown request type")
}
diff --git a/pkg/services/object/ape/checker_test.go b/pkg/services/object/ape/checker_test.go
index afe19fc51..090f6a83c 100644
--- a/pkg/services/object/ape/checker_test.go
+++ b/pkg/services/object/ape/checker_test.go
@@ -518,22 +518,7 @@ func TestAPECheck_BearerTokenOverrides(t *testing.T) {
ls := inmemory.NewInmemoryLocalStorage()
ms := inmemory.NewInmemoryMorphRuleChainStorage()
- node1Key, err := keys.NewPrivateKey()
- require.NoError(t, err)
- node1 := netmapSDK.NodeInfo{}
- node1.SetPublicKey(node1Key.PublicKey().Bytes())
- netmap := &netmapSDK.NetMap{}
- netmap.SetEpoch(100)
- netmap.SetNodes([]netmapSDK.NodeInfo{node1})
-
- nm := &netmapStub{
- currentEpoch: 100,
- netmaps: map[uint64]*netmapSDK.NetMap{
- 100: netmap,
- },
- }
-
- checker := NewChecker(ls, ms, headerProvider, frostfsidProvider, nm, &stMock{}, nil, nil)
+ checker := NewChecker(ls, ms, headerProvider, frostfsidProvider, nil, &stMock{}, nil, nil)
prm := Prm{
Method: method,
@@ -556,7 +541,7 @@ func TestAPECheck_BearerTokenOverrides(t *testing.T) {
}
}
- err = checker.CheckAPE(context.Background(), prm)
+ err := checker.CheckAPE(context.Background(), prm)
if test.expectAPEErr {
require.Error(t, err)
} else {
diff --git a/pkg/services/object/ape/request.go b/pkg/services/object/ape/request.go
index 1c129f65f..da5307ca7 100644
--- a/pkg/services/object/ape/request.go
+++ b/pkg/services/object/ape/request.go
@@ -103,7 +103,8 @@ func (c *checkerImpl) newAPERequest(ctx context.Context, prm Prm) (aperequest.Re
nativeschema.MethodHeadObject,
nativeschema.MethodRangeObject,
nativeschema.MethodHashObject,
- nativeschema.MethodDeleteObject:
+ nativeschema.MethodDeleteObject,
+ nativeschema.MethodPatchObject:
if prm.Object == nil {
return defaultRequest, fmt.Errorf("method %s: %w", prm.Method, errMissingOID)
}
diff --git a/pkg/services/object/ape/service.go b/pkg/services/object/ape/service.go
index 64dd19c24..a1634e7c5 100644
--- a/pkg/services/object/ape/service.go
+++ b/pkg/services/object/ape/service.go
@@ -204,8 +204,60 @@ func (c *Service) Put() (objectSvc.PutObjectStream, error) {
}, err
}
+type patchStreamBasicChecker struct {
+ apeChecker Checker
+
+ next objectSvc.PatchObjectStream
+
+ nonFirstSend bool
+}
+
+func (p *patchStreamBasicChecker) Send(ctx context.Context, request *objectV2.PatchRequest) error {
+ if !p.nonFirstSend {
+ p.nonFirstSend = true
+
+ reqCtx, err := requestContext(ctx)
+ if err != nil {
+ return toStatusErr(err)
+ }
+
+ cnrID, objID, err := getAddressParamsSDK(request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID())
+ if err != nil {
+ return toStatusErr(err)
+ }
+
+ prm := Prm{
+ Namespace: reqCtx.Namespace,
+ Container: cnrID,
+ Object: objID,
+ Method: nativeschema.MethodPatchObject,
+ SenderKey: hex.EncodeToString(reqCtx.SenderKey),
+ ContainerOwner: reqCtx.ContainerOwner,
+ Role: nativeSchemaRole(reqCtx.Role),
+ SoftAPECheck: reqCtx.SoftAPECheck,
+ BearerToken: reqCtx.BearerToken,
+ XHeaders: request.GetMetaHeader().GetXHeaders(),
+ }
+
+ if err := p.apeChecker.CheckAPE(ctx, prm); err != nil {
+ return toStatusErr(err)
+ }
+ }
+
+ return p.next.Send(ctx, request)
+}
+
+func (p patchStreamBasicChecker) CloseAndRecv(ctx context.Context) (*objectV2.PatchResponse, error) {
+ return p.next.CloseAndRecv(ctx)
+}
+
func (c *Service) Patch() (objectSvc.PatchObjectStream, error) {
- return c.next.Patch()
+ streamer, err := c.next.Patch()
+
+ return &patchStreamBasicChecker{
+ apeChecker: c.apeChecker,
+ next: streamer,
+ }, err
}
func (c *Service) Head(ctx context.Context, request *objectV2.HeadRequest) (*objectV2.HeadResponse, error) {
From 41104f2383226b9e92c13b1907403c3f9176bbae Mon Sep 17 00:00:00 2001
From: Airat Arifullin
Date: Wed, 14 Aug 2024 14:39:13 +0300
Subject: [PATCH 038/705] [#1307] cli: Make cli process object.patch
Signed-off-by: Airat Arifullin
---
cmd/frostfs-cli/modules/util/ape.go | 3 +++
1 file changed, 3 insertions(+)
diff --git a/cmd/frostfs-cli/modules/util/ape.go b/cmd/frostfs-cli/modules/util/ape.go
index 532dc0a50..9af57434a 100644
--- a/cmd/frostfs-cli/modules/util/ape.go
+++ b/cmd/frostfs-cli/modules/util/ape.go
@@ -239,6 +239,8 @@ func parseAction(lexeme string) ([]string, bool, error) {
return []string{nativeschema.MethodRangeObject}, true, nil
case "object.hash":
return []string{nativeschema.MethodHashObject}, true, nil
+ case "object.patch":
+ return []string{nativeschema.MethodPatchObject}, true, nil
case "object.*":
return []string{
nativeschema.MethodPutObject,
@@ -248,6 +250,7 @@ func parseAction(lexeme string) ([]string, bool, error) {
nativeschema.MethodSearchObject,
nativeschema.MethodRangeObject,
nativeschema.MethodHashObject,
+ nativeschema.MethodPatchObject,
}, true, nil
case "container.put":
return []string{nativeschema.MethodPutContainer}, false, nil
From b7acb34fa4377a690f5f7fbe18c580c2d785b772 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Mon, 19 Aug 2024 15:50:35 +0300
Subject: [PATCH 039/705] [#1319] treeSvc: Do not wrap error from APE
Signed-off-by: Dmitrii Stepanov
---
pkg/services/tree/ape.go | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/pkg/services/tree/ape.go b/pkg/services/tree/ape.go
index 6e78bf4ec..ee4687911 100644
--- a/pkg/services/tree/ape.go
+++ b/pkg/services/tree/ape.go
@@ -138,7 +138,7 @@ func (s *Service) checkAPE(ctx context.Context, bt *bearer.Token,
request, err := s.newAPERequest(ctx, namespace, cid, operation, role, publicKey)
if err != nil {
- return apeErr(err)
+ return fmt.Errorf("failed to create ape request: %w", err)
}
var cr engine.ChainRouter
@@ -167,7 +167,7 @@ func (s *Service) checkAPE(ctx context.Context, bt *bearer.Token,
rt := engine.NewRequestTargetExtended(namespace, cid.EncodeToString(), fmt.Sprintf("%s:%s", namespace, publicKey.Address()), groups)
status, found, err := cr.IsAllowed(apechain.Ingress, rt, request)
if err != nil {
- return apeErr(err)
+ return err
}
if found && status == apechain.Allow {
return nil
From 8319b59238925960597650d3227310d8b6f187ec Mon Sep 17 00:00:00 2001
From: Anton Nikiforov
Date: Mon, 19 Aug 2024 12:33:15 +0300
Subject: [PATCH 040/705] [#1318] Fix gofumpt issue
Signed-off-by: Anton Nikiforov
---
pkg/services/object/metrics.go | 1 +
1 file changed, 1 insertion(+)
diff --git a/pkg/services/object/metrics.go b/pkg/services/object/metrics.go
index e53b7584f..61aed5003 100644
--- a/pkg/services/object/metrics.go
+++ b/pkg/services/object/metrics.go
@@ -213,6 +213,7 @@ func (s putStreamMetric) CloseAndRecv(ctx context.Context) (*object.PutResponse,
return res, err
}
+
func (s patchStreamMetric) Send(ctx context.Context, req *object.PatchRequest) error {
s.metrics.AddPayloadSize("Patch", len(req.GetBody().GetPatch().GetChunk()))
From 6ff0b0996b31d0de3bbb12d41edb19bdd0c16dda Mon Sep 17 00:00:00 2001
From: Anton Nikiforov
Date: Mon, 19 Aug 2024 12:34:05 +0300
Subject: [PATCH 041/705] [#1318] metrics: Fix `container_size_bytes` for EC
When node put chunk into EC container, `policer` may remove it as redundant.
This chunk marked as removed. When parent object removed and `gc` start iterating over chunk,
node count removing chunk twice.
Signed-off-by: Anton Nikiforov
---
pkg/local_object_storage/metabase/inhume.go | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/pkg/local_object_storage/metabase/inhume.go b/pkg/local_object_storage/metabase/inhume.go
index c265fb217..b62accc43 100644
--- a/pkg/local_object_storage/metabase/inhume.go
+++ b/pkg/local_object_storage/metabase/inhume.go
@@ -236,7 +236,7 @@ func (db *DB) inhumeTx(tx *bbolt.Tx, epoch uint64, prm InhumePrm, res *InhumeRes
return err
}
} else if errors.As(err, &ecErr) {
- err = db.inhumeECInfo(tx, epoch, prm.tomb, res, garbageBKT, graveyardBKT, ecErr.ECInfo(), cnr, bkt, value, targetKey)
+ err = db.inhumeECInfo(tx, epoch, prm.tomb, res, garbageBKT, graveyardBKT, ecErr.ECInfo(), cnr, bkt, value)
if err != nil {
return err
}
@@ -280,7 +280,7 @@ func (db *DB) inhumeTx(tx *bbolt.Tx, epoch uint64, prm InhumePrm, res *InhumeRes
func (db *DB) inhumeECInfo(tx *bbolt.Tx, epoch uint64, tomb *oid.Address, res *InhumeRes,
garbageBKT *bbolt.Bucket, graveyardBKT *bbolt.Bucket,
- ecInfo *objectSDK.ECInfo, cnr cid.ID, targetBucket *bbolt.Bucket, value []byte, targetKey []byte,
+ ecInfo *objectSDK.ECInfo, cnr cid.ID, targetBucket *bbolt.Bucket, value []byte,
) error {
for _, chunk := range ecInfo.Chunks {
chunkBuf := make([]byte, addressKeySize)
@@ -296,11 +296,11 @@ func (db *DB) inhumeECInfo(tx *bbolt.Tx, epoch uint64, tomb *oid.Address, res *I
if err != nil {
return err
}
- err = db.updateDeleteInfo(tx, garbageBKT, graveyardBKT, targetKey, cnr, chunkObj, res)
+ chunkKey := addressKey(chunkAddr, chunkBuf)
+ err = db.updateDeleteInfo(tx, garbageBKT, graveyardBKT, chunkKey, cnr, chunkObj, res)
if err != nil {
return err
}
- chunkKey := addressKey(chunkAddr, chunkBuf)
if tomb != nil {
_, err = db.markAsGC(graveyardBKT, garbageBKT, chunkKey)
if err != nil {
From cfda9003a7d4283946e9cbe742702114f93bbdce Mon Sep 17 00:00:00 2001
From: Anton Nikiforov
Date: Tue, 20 Aug 2024 16:18:46 +0300
Subject: [PATCH 042/705] [#1318] meta: Add test `TestInhumeECObject`
Signed-off-by: Anton Nikiforov
---
.../metabase/inhume_ec_test.go | 116 ++++++++++++++++++
1 file changed, 116 insertions(+)
create mode 100644 pkg/local_object_storage/metabase/inhume_ec_test.go
diff --git a/pkg/local_object_storage/metabase/inhume_ec_test.go b/pkg/local_object_storage/metabase/inhume_ec_test.go
new file mode 100644
index 000000000..c3b1e72da
--- /dev/null
+++ b/pkg/local_object_storage/metabase/inhume_ec_test.go
@@ -0,0 +1,116 @@
+package meta
+
+import (
+ "context"
+ "path/filepath"
+ "testing"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
+ apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+ cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
+ "github.com/stretchr/testify/require"
+)
+
+func TestInhumeECObject(t *testing.T) {
+ t.Parallel()
+
+ db := New(
+ WithPath(filepath.Join(t.TempDir(), "metabase")),
+ WithPermissions(0o600),
+ WithEpochState(epochState{uint64(12)}),
+ )
+
+ require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
+ require.NoError(t, db.Init())
+ defer func() { require.NoError(t, db.Close()) }()
+
+ cnr := cidtest.ID()
+ ecChunk := oidtest.ID()
+ ecChunk2 := oidtest.ID()
+ ecParent := oidtest.ID()
+ tombstoneID := oidtest.ID()
+
+ chunkObj := testutil.GenerateObjectWithCID(cnr)
+ chunkObj.SetContainerID(cnr)
+ chunkObj.SetID(ecChunk)
+ chunkObj.SetPayload([]byte{0, 1, 2, 3, 4})
+ chunkObj.SetPayloadSize(uint64(5))
+ chunkObj.SetECHeader(objectSDK.NewECHeader(objectSDK.ECParentInfo{ID: ecParent}, 0, 3, []byte{}, 0))
+
+ chunkObj2 := testutil.GenerateObjectWithCID(cnr)
+ chunkObj2.SetContainerID(cnr)
+ chunkObj2.SetID(ecChunk2)
+ chunkObj2.SetPayload([]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9})
+ chunkObj2.SetPayloadSize(uint64(10))
+ chunkObj2.SetECHeader(objectSDK.NewECHeader(objectSDK.ECParentInfo{ID: ecParent}, 1, 3, []byte{}, 0))
+
+ // put object with EC
+
+ var prm PutPrm
+ prm.SetObject(chunkObj)
+ prm.SetStorageID([]byte("0/0"))
+ _, err := db.Put(context.Background(), prm)
+ require.NoError(t, err)
+
+ prm.SetObject(chunkObj2)
+ _, err = db.Put(context.Background(), prm)
+ require.NoError(t, err)
+
+ var ecChunkAddress oid.Address
+ ecChunkAddress.SetContainer(cnr)
+ ecChunkAddress.SetObject(ecChunk)
+
+ var ecParentAddress oid.Address
+ ecParentAddress.SetContainer(cnr)
+ ecParentAddress.SetObject(ecParent)
+
+ var chunkObjectAddress oid.Address
+ chunkObjectAddress.SetContainer(cnr)
+ chunkObjectAddress.SetObject(ecChunk)
+
+ var getPrm GetPrm
+
+ getPrm.SetAddress(ecChunkAddress)
+ _, err = db.Get(context.Background(), getPrm)
+ require.NoError(t, err)
+
+ var ecInfoError *objectSDK.ECInfoError
+ getPrm.SetAddress(ecParentAddress)
+ _, err = db.Get(context.Background(), getPrm)
+ require.ErrorAs(t, err, &ecInfoError)
+ require.True(t, len(ecInfoError.ECInfo().Chunks) == 2 &&
+ ecInfoError.ECInfo().Chunks[0].Index == 0 &&
+ ecInfoError.ECInfo().Chunks[0].Total == 3)
+
+ // inhume Chunk
+ var inhumePrm InhumePrm
+ var tombAddress oid.Address
+ inhumePrm.SetAddresses(chunkObjectAddress)
+ res, err := db.Inhume(context.Background(), inhumePrm)
+ require.NoError(t, err)
+ require.True(t, len(res.deletionDetails) == 1)
+ require.True(t, res.deletionDetails[0].Size == 5)
+
+ // inhume EC parent (like Delete does)
+ tombAddress.SetContainer(cnr)
+ tombAddress.SetObject(tombstoneID)
+ inhumePrm.SetAddresses(ecParentAddress)
+ inhumePrm.SetTombstoneAddress(tombAddress)
+ res, err = db.Inhume(context.Background(), inhumePrm)
+ require.NoError(t, err)
+ // Previously deleted chunk shouldn't be in the details, because it is marked as garbage
+ require.True(t, len(res.deletionDetails) == 1)
+ require.True(t, res.deletionDetails[0].Size == 10)
+
+ getPrm.SetAddress(ecParentAddress)
+ _, err = db.Get(context.Background(), getPrm)
+ require.ErrorAs(t, err, new(*apistatus.ObjectAlreadyRemoved))
+
+ getPrm.SetAddress(ecChunkAddress)
+ _, err = db.Get(context.Background(), getPrm)
+ require.ErrorAs(t, err, new(*apistatus.ObjectAlreadyRemoved))
+}
From 8c1082b31a990273020bde006f63d160b51620df Mon Sep 17 00:00:00 2001
From: Ekaterina Lebedeva
Date: Tue, 20 Aug 2024 15:35:45 +0300
Subject: [PATCH 043/705] [#1316] go.mod: Bump go version to 1.22
Signed-off-by: Ekaterina Lebedeva
---
.forgejo/workflows/build.yml | 2 +-
.forgejo/workflows/pre-commit.yml | 2 +-
.forgejo/workflows/tests.yml | 12 ++++++------
.forgejo/workflows/vulncheck.yml | 2 +-
Makefile | 8 ++++----
README.md | 2 +-
go.mod | 2 +-
7 files changed, 15 insertions(+), 15 deletions(-)
diff --git a/.forgejo/workflows/build.yml b/.forgejo/workflows/build.yml
index 86943fe88..ce2d64dd9 100644
--- a/.forgejo/workflows/build.yml
+++ b/.forgejo/workflows/build.yml
@@ -8,7 +8,7 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
- go_versions: [ '1.21', '1.22' ]
+ go_versions: [ '1.22', '1.23' ]
steps:
- uses: actions/checkout@v3
diff --git a/.forgejo/workflows/pre-commit.yml b/.forgejo/workflows/pre-commit.yml
index 117cda93b..8b06a2fdf 100644
--- a/.forgejo/workflows/pre-commit.yml
+++ b/.forgejo/workflows/pre-commit.yml
@@ -16,7 +16,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v3
with:
- go-version: 1.22
+ go-version: 1.23
- name: Set up Python
run: |
apt update
diff --git a/.forgejo/workflows/tests.yml b/.forgejo/workflows/tests.yml
index 5d64d7bc4..a908c6278 100644
--- a/.forgejo/workflows/tests.yml
+++ b/.forgejo/workflows/tests.yml
@@ -11,7 +11,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v3
with:
- go-version: '1.22'
+ go-version: '1.23'
cache: true
- name: Install linters
@@ -25,7 +25,7 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
- go_versions: [ '1.21', '1.22' ]
+ go_versions: [ '1.22', '1.23' ]
fail-fast: false
steps:
- uses: actions/checkout@v3
@@ -48,7 +48,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v3
with:
- go-version: '1.21'
+ go-version: '1.22'
cache: true
- name: Run tests
@@ -63,7 +63,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v3
with:
- go-version: '1.22'
+ go-version: '1.23'
cache: true
- name: Install staticcheck
@@ -81,7 +81,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v3
with:
- go-version: '1.21'
+ go-version: '1.22'
cache: true
- name: Install gopls
@@ -99,7 +99,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v3
with:
- go-version: '1.22'
+ go-version: '1.23'
cache: true
- name: Install gofumpt
diff --git a/.forgejo/workflows/vulncheck.yml b/.forgejo/workflows/vulncheck.yml
index 3af564c4b..2951a8059 100644
--- a/.forgejo/workflows/vulncheck.yml
+++ b/.forgejo/workflows/vulncheck.yml
@@ -13,7 +13,7 @@ jobs:
- name: Setup Go
uses: actions/setup-go@v3
with:
- go-version: '1.22'
+ go-version: '1.23'
- name: Install govulncheck
run: go install golang.org/x/vuln/cmd/govulncheck@latest
diff --git a/Makefile b/Makefile
index 11111d9a7..8d1238b6f 100755
--- a/Makefile
+++ b/Makefile
@@ -8,8 +8,8 @@ HUB_IMAGE ?= truecloudlab/frostfs
HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')"
GO_VERSION ?= 1.22
-LINT_VERSION ?= 1.56.1
-TRUECLOUDLAB_LINT_VERSION ?= 0.0.5
+LINT_VERSION ?= 1.60.1
+TRUECLOUDLAB_LINT_VERSION ?= 0.0.6
PROTOC_VERSION ?= 25.0
PROTOC_GEN_GO_VERSION ?= $(shell go list -f '{{.Version}}' -m google.golang.org/protobuf)
PROTOGEN_FROSTFS_VERSION ?= $(shell go list -f '{{.Version}}' -m git.frostfs.info/TrueCloudLab/frostfs-api-go/v2)
@@ -17,7 +17,7 @@ PROTOC_OS_VERSION=osx-x86_64
ifeq ($(shell uname), Linux)
PROTOC_OS_VERSION=linux-x86_64
endif
-STATICCHECK_VERSION ?= 2023.1.6
+STATICCHECK_VERSION ?= 2024.1.1
ARCH = amd64
BIN = bin
@@ -46,7 +46,7 @@ STATICCHECK_VERSION_DIR ?= $(STATICCHECK_DIR)/$(STATICCHECK_VERSION)
SOURCES = $(shell find . -type f -name "*.go" -print)
-GOFUMPT_VERSION ?= v0.6.0
+GOFUMPT_VERSION ?= v0.7.0
GOFUMPT_DIR ?= $(abspath $(BIN))/gofumpt
GOFUMPT_VERSION_DIR ?= $(GOFUMPT_DIR)/$(GOFUMPT_VERSION)
diff --git a/README.md b/README.md
index 413010372..8225f56c5 100644
--- a/README.md
+++ b/README.md
@@ -49,7 +49,7 @@ The latest version of frostfs-node works with frostfs-contract
# Building
-To make all binaries you need Go 1.21+ and `make`:
+To make all binaries you need Go 1.22+ and `make`:
```
make all
```
diff --git a/go.mod b/go.mod
index 93ed7d750..79dcc9553 100644
--- a/go.mod
+++ b/go.mod
@@ -1,6 +1,6 @@
module git.frostfs.info/TrueCloudLab/frostfs-node
-go 1.21
+go 1.22
require (
code.gitea.io/sdk/gitea v0.17.1
From a345c972bffd63c5df513a4e6770b5bc12811e3b Mon Sep 17 00:00:00 2001
From: Ekaterina Lebedeva
Date: Tue, 20 Aug 2024 18:37:04 +0300
Subject: [PATCH 044/705] [#1316] lint: Fix warnings
Renamed parameters `min/max` to avoid conflicts with
predeclared identifiers.
Replaced background context with parent context without
cancellation in closer functions in frostfs-node.
Signed-off-by: Ekaterina Lebedeva
---
.golangci.yml | 3 ++-
cmd/frostfs-node/config.go | 2 +-
cmd/frostfs-node/tracing.go | 2 +-
.../blobstor/internal/blobstortest/common.go | 16 ++++++++--------
.../blobstor/internal/blobstortest/control.go | 6 +++---
.../blobstor/internal/blobstortest/delete.go | 4 ++--
.../blobstor/internal/blobstortest/exists.go | 4 ++--
.../blobstor/internal/blobstortest/get.go | 4 ++--
.../blobstor/internal/blobstortest/get_range.go | 4 ++--
.../blobstor/internal/blobstortest/iterate.go | 4 ++--
pkg/morph/client/notary.go | 4 ++--
11 files changed, 27 insertions(+), 26 deletions(-)
diff --git a/.golangci.yml b/.golangci.yml
index d209693aa..2e9e78fc3 100644
--- a/.golangci.yml
+++ b/.golangci.yml
@@ -12,7 +12,8 @@ run:
# output configuration options
output:
# colored-line-number|line-number|json|tab|checkstyle|code-climate, default is "colored-line-number"
- format: tab
+ formats:
+ - format: tab
# all available settings of specific linters
linters-settings:
diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go
index 1af27d733..110281418 100644
--- a/cmd/frostfs-node/config.go
+++ b/cmd/frostfs-node/config.go
@@ -1073,7 +1073,7 @@ func initLocalStorage(ctx context.Context, c *cfg) {
c.onShutdown(func() {
c.log.Info(logs.FrostFSNodeClosingComponentsOfTheStorageEngine)
- err := ls.Close(context.Background())
+ err := ls.Close(context.WithoutCancel(ctx))
if err != nil {
c.log.Info(logs.FrostFSNodeStorageEngineClosingFailure,
zap.String("error", err.Error()),
diff --git a/cmd/frostfs-node/tracing.go b/cmd/frostfs-node/tracing.go
index 312adfb8d..675c31374 100644
--- a/cmd/frostfs-node/tracing.go
+++ b/cmd/frostfs-node/tracing.go
@@ -21,7 +21,7 @@ func initTracing(ctx context.Context, c *cfg) {
c.closers = append(c.closers, closer{
name: "tracing",
fn: func() {
- ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
+ ctx, cancel := context.WithTimeout(context.WithoutCancel(ctx), time.Second*5)
defer cancel()
err := tracing.Shutdown(ctx) // cfg context cancels before close
if err != nil {
diff --git a/pkg/local_object_storage/blobstor/internal/blobstortest/common.go b/pkg/local_object_storage/blobstor/internal/blobstortest/common.go
index c08e39bf1..5d14a9a3a 100644
--- a/pkg/local_object_storage/blobstor/internal/blobstortest/common.go
+++ b/pkg/local_object_storage/blobstor/internal/blobstortest/common.go
@@ -27,21 +27,21 @@ type objectDesc struct {
storageID []byte
}
-func TestAll(t *testing.T, cons Constructor, min, max uint64) {
+func TestAll(t *testing.T, cons Constructor, minSize, maxSize uint64) {
t.Run("get", func(t *testing.T) {
- TestGet(t, cons, min, max)
+ TestGet(t, cons, minSize, maxSize)
})
t.Run("get range", func(t *testing.T) {
- TestGetRange(t, cons, min, max)
+ TestGetRange(t, cons, minSize, maxSize)
})
t.Run("delete", func(t *testing.T) {
- TestDelete(t, cons, min, max)
+ TestDelete(t, cons, minSize, maxSize)
})
t.Run("exists", func(t *testing.T) {
- TestExists(t, cons, min, max)
+ TestExists(t, cons, minSize, maxSize)
})
t.Run("iterate", func(t *testing.T) {
- TestIterate(t, cons, min, max)
+ TestIterate(t, cons, minSize, maxSize)
})
}
@@ -51,12 +51,12 @@ func TestInfo(t *testing.T, cons Constructor, expectedType string, expectedPath
require.Equal(t, expectedPath, s.Path())
}
-func prepare(t *testing.T, count int, s common.Storage, min, max uint64) []objectDesc {
+func prepare(t *testing.T, count int, s common.Storage, minSize, maxSize uint64) []objectDesc {
objects := make([]objectDesc, count)
r := mrand.New(mrand.NewSource(0))
for i := range objects {
- objects[i].obj = NewObject(min + uint64(r.Intn(int(max-min+1)))) // not too large
+ objects[i].obj = NewObject(minSize + uint64(r.Intn(int(maxSize-minSize+1)))) // not too large
objects[i].addr = objectCore.AddressOf(objects[i].obj)
raw, err := objects[i].obj.Marshal()
diff --git a/pkg/local_object_storage/blobstor/internal/blobstortest/control.go b/pkg/local_object_storage/blobstor/internal/blobstortest/control.go
index a3bbc021d..21c80b089 100644
--- a/pkg/local_object_storage/blobstor/internal/blobstortest/control.go
+++ b/pkg/local_object_storage/blobstor/internal/blobstortest/control.go
@@ -13,12 +13,12 @@ import (
// TestControl checks correctness of a read-only mode.
// cons must return a storage which is NOT opened.
-func TestControl(t *testing.T, cons Constructor, min, max uint64) {
+func TestControl(t *testing.T, cons Constructor, minSize, maxSize uint64) {
s := cons(t)
require.NoError(t, s.Open(mode.ComponentReadWrite))
require.NoError(t, s.Init())
- objects := prepare(t, 10, s, min, max)
+ objects := prepare(t, 10, s, minSize, maxSize)
require.NoError(t, s.Close())
require.NoError(t, s.Open(mode.ComponentReadOnly))
@@ -34,7 +34,7 @@ func TestControl(t *testing.T, cons Constructor, min, max uint64) {
t.Run("put fails", func(t *testing.T) {
var prm common.PutPrm
- prm.Object = NewObject(min + uint64(rand.Intn(int(max-min+1))))
+ prm.Object = NewObject(minSize + uint64(rand.Intn(int(maxSize-minSize+1))))
prm.Address = objectCore.AddressOf(prm.Object)
_, err := s.Put(context.Background(), prm)
diff --git a/pkg/local_object_storage/blobstor/internal/blobstortest/delete.go b/pkg/local_object_storage/blobstor/internal/blobstortest/delete.go
index 750619a30..cf4e76513 100644
--- a/pkg/local_object_storage/blobstor/internal/blobstortest/delete.go
+++ b/pkg/local_object_storage/blobstor/internal/blobstortest/delete.go
@@ -11,13 +11,13 @@ import (
"github.com/stretchr/testify/require"
)
-func TestDelete(t *testing.T, cons Constructor, min, max uint64) {
+func TestDelete(t *testing.T, cons Constructor, minSize, maxSize uint64) {
s := cons(t)
require.NoError(t, s.Open(mode.ComponentReadWrite))
require.NoError(t, s.Init())
defer func() { require.NoError(t, s.Close()) }()
- objects := prepare(t, 4, s, min, max)
+ objects := prepare(t, 4, s, minSize, maxSize)
t.Run("delete non-existent", func(t *testing.T) {
var prm common.DeletePrm
diff --git a/pkg/local_object_storage/blobstor/internal/blobstortest/exists.go b/pkg/local_object_storage/blobstor/internal/blobstortest/exists.go
index 33b50b12f..08465ed5e 100644
--- a/pkg/local_object_storage/blobstor/internal/blobstortest/exists.go
+++ b/pkg/local_object_storage/blobstor/internal/blobstortest/exists.go
@@ -10,13 +10,13 @@ import (
"github.com/stretchr/testify/require"
)
-func TestExists(t *testing.T, cons Constructor, min, max uint64) {
+func TestExists(t *testing.T, cons Constructor, minSize, maxSize uint64) {
s := cons(t)
require.NoError(t, s.Open(mode.ComponentReadWrite))
require.NoError(t, s.Init())
defer func() { require.NoError(t, s.Close()) }()
- objects := prepare(t, 1, s, min, max)
+ objects := prepare(t, 1, s, minSize, maxSize)
t.Run("missing object", func(t *testing.T) {
prm := common.ExistsPrm{Address: oidtest.Address()}
diff --git a/pkg/local_object_storage/blobstor/internal/blobstortest/get.go b/pkg/local_object_storage/blobstor/internal/blobstortest/get.go
index 12f73c3e9..d1f709b0c 100644
--- a/pkg/local_object_storage/blobstor/internal/blobstortest/get.go
+++ b/pkg/local_object_storage/blobstor/internal/blobstortest/get.go
@@ -11,13 +11,13 @@ import (
"github.com/stretchr/testify/require"
)
-func TestGet(t *testing.T, cons Constructor, min, max uint64) {
+func TestGet(t *testing.T, cons Constructor, minSize, maxSize uint64) {
s := cons(t)
require.NoError(t, s.Open(mode.ComponentReadWrite))
require.NoError(t, s.Init())
defer func() { require.NoError(t, s.Close()) }()
- objects := prepare(t, 2, s, min, max)
+ objects := prepare(t, 2, s, minSize, maxSize)
t.Run("missing object", func(t *testing.T) {
gPrm := common.GetPrm{Address: oidtest.Address()}
diff --git a/pkg/local_object_storage/blobstor/internal/blobstortest/get_range.go b/pkg/local_object_storage/blobstor/internal/blobstortest/get_range.go
index 93de683c2..fcbeddac7 100644
--- a/pkg/local_object_storage/blobstor/internal/blobstortest/get_range.go
+++ b/pkg/local_object_storage/blobstor/internal/blobstortest/get_range.go
@@ -13,13 +13,13 @@ import (
"github.com/stretchr/testify/require"
)
-func TestGetRange(t *testing.T, cons Constructor, min, max uint64) {
+func TestGetRange(t *testing.T, cons Constructor, minSize, maxSize uint64) {
s := cons(t)
require.NoError(t, s.Open(mode.ComponentReadWrite))
require.NoError(t, s.Init())
defer func() { require.NoError(t, s.Close()) }()
- objects := prepare(t, 1, s, min, max)
+ objects := prepare(t, 1, s, minSize, maxSize)
t.Run("missing object", func(t *testing.T) {
gPrm := common.GetRangePrm{Address: oidtest.Address()}
diff --git a/pkg/local_object_storage/blobstor/internal/blobstortest/iterate.go b/pkg/local_object_storage/blobstor/internal/blobstortest/iterate.go
index e66fe87b6..3a6c8b699 100644
--- a/pkg/local_object_storage/blobstor/internal/blobstortest/iterate.go
+++ b/pkg/local_object_storage/blobstor/internal/blobstortest/iterate.go
@@ -10,13 +10,13 @@ import (
"github.com/stretchr/testify/require"
)
-func TestIterate(t *testing.T, cons Constructor, min, max uint64) {
+func TestIterate(t *testing.T, cons Constructor, minSize, maxSize uint64) {
s := cons(t)
require.NoError(t, s.Open(mode.ComponentReadWrite))
require.NoError(t, s.Init())
defer func() { require.NoError(t, s.Close()) }()
- objects := prepare(t, 10, s, min, max)
+ objects := prepare(t, 10, s, minSize, maxSize)
// Delete random object to ensure it is not iterated over.
const delID = 2
diff --git a/pkg/morph/client/notary.go b/pkg/morph/client/notary.go
index 4865b43ef..616b3b5c3 100644
--- a/pkg/morph/client/notary.go
+++ b/pkg/morph/client/notary.go
@@ -641,8 +641,8 @@ func (c *Client) notaryTxValidationLimit() (uint32, error) {
return 0, fmt.Errorf("can't get current blockchain height: %w", err)
}
- min := bc + c.notary.txValidTime
- rounded := (min/c.notary.roundTime + 1) * c.notary.roundTime
+ minTime := bc + c.notary.txValidTime
+ rounded := (minTime/c.notary.roundTime + 1) * c.notary.roundTime
return rounded, nil
}
From 7bca428db03716e37e1fec315ef974886a2e9fea Mon Sep 17 00:00:00 2001
From: Evgenii Stratonikov
Date: Mon, 19 Aug 2024 18:28:53 +0300
Subject: [PATCH 045/705] [#1322] Use new protobuf marshaler
Signed-off-by: Evgenii Stratonikov
---
Makefile | 8 +-
.../modules/control/set_netmap_status.go | 2 +-
.../modules/control/shards_set_mode.go | 4 +-
cmd/frostfs-cli/modules/control/util.go | 4 +-
cmd/frostfs-node/netmap.go | 2 +-
go.mod | 7 +-
go.sum | 10 +-
pkg/local_object_storage/metabase/get.go | 7 +-
pkg/services/control/common_test.go | 33 -
pkg/services/control/ir/service.go | 46 -
pkg/services/control/ir/service.pb.go | 1277 --
pkg/services/control/ir/service_frostfs.pb.go | 2584 ++-
pkg/services/control/ir/service_test.go | 44 -
pkg/services/control/ir/types.go | 15 -
pkg/services/control/ir/types.pb.go | 230 -
pkg/services/control/ir/types_frostfs.pb.go | 218 +-
pkg/services/control/server/list_shards.go | 4 +-
pkg/services/control/service.go | 142 -
pkg/services/control/service.pb.go | 7094 --------
pkg/services/control/service_frostfs.pb.go | 14829 ++++++++++++++--
pkg/services/control/service_test.go | 181 -
pkg/services/control/types.go | 118 -
pkg/services/control/types.pb.go | 1011 --
pkg/services/control/types_frostfs.pb.go | 1721 +-
pkg/services/control/types_test.go | 151 -
pkg/services/tree/service.pb.go | 3587 ----
pkg/services/tree/service_frostfs.pb.go | 7928 ++++++++-
pkg/services/tree/types.pb.go | 320 -
pkg/services/tree/types_frostfs.pb.go | 537 +-
29 files changed, 24581 insertions(+), 17533 deletions(-)
delete mode 100644 pkg/services/control/common_test.go
delete mode 100644 pkg/services/control/ir/service.go
delete mode 100644 pkg/services/control/ir/service.pb.go
delete mode 100644 pkg/services/control/ir/service_test.go
delete mode 100644 pkg/services/control/ir/types.go
delete mode 100644 pkg/services/control/ir/types.pb.go
delete mode 100644 pkg/services/control/service.go
delete mode 100644 pkg/services/control/service.pb.go
delete mode 100644 pkg/services/control/service_test.go
delete mode 100644 pkg/services/control/types.go
delete mode 100644 pkg/services/control/types.pb.go
delete mode 100644 pkg/services/control/types_test.go
delete mode 100644 pkg/services/tree/service.pb.go
delete mode 100644 pkg/services/tree/types.pb.go
diff --git a/Makefile b/Makefile
index 8d1238b6f..94a8a14c3 100755
--- a/Makefile
+++ b/Makefile
@@ -11,7 +11,6 @@ GO_VERSION ?= 1.22
LINT_VERSION ?= 1.60.1
TRUECLOUDLAB_LINT_VERSION ?= 0.0.6
PROTOC_VERSION ?= 25.0
-PROTOC_GEN_GO_VERSION ?= $(shell go list -f '{{.Version}}' -m google.golang.org/protobuf)
PROTOGEN_FROSTFS_VERSION ?= $(shell go list -f '{{.Version}}' -m git.frostfs.info/TrueCloudLab/frostfs-api-go/v2)
PROTOC_OS_VERSION=osx-x86_64
ifeq ($(shell uname), Linux)
@@ -39,7 +38,6 @@ LINT_DIR = $(OUTPUT_LINT_DIR)/golangci-lint-$(LINT_VERSION)-v$(TRUECLOUDLAB_LINT
TMP_DIR := .cache
PROTOBUF_DIR ?= $(abspath $(BIN))/protobuf
PROTOC_DIR ?= $(PROTOBUF_DIR)/protoc-v$(PROTOC_VERSION)
-PROTOC_GEN_GO_DIR ?= $(PROTOBUF_DIR)/protoc-gen-go-$(PROTOC_GEN_GO_VERSION)
PROTOGEN_FROSTFS_DIR ?= $(PROTOBUF_DIR)/protogen-$(PROTOGEN_FROSTFS_VERSION)
STATICCHECK_DIR ?= $(abspath $(BIN))/staticcheck
STATICCHECK_VERSION_DIR ?= $(STATICCHECK_DIR)/$(STATICCHECK_VERSION)
@@ -107,17 +105,15 @@ export-metrics: dep
# Regenerate proto files:
protoc:
- @if [ ! -d "$(PROTOC_DIR)" ] || [ ! -d "$(PROTOC_GEN_GO_DIR)" ] || [ ! -d "$(PROTOGEN_FROSTFS_DIR)" ]; then \
+ @if [ ! -d "$(PROTOC_DIR)" ] || [ ! -d "$(PROTOGEN_FROSTFS_DIR)" ]; then \
make protoc-install; \
fi
@for f in `find . -type f -name '*.proto' -not -path './bin/*'`; do \
echo "⇒ Processing $$f "; \
$(PROTOC_DIR)/bin/protoc \
--proto_path=.:$(PROTOC_DIR)/include:/usr/local/include \
- --plugin=protoc-gen-go=$(PROTOC_GEN_GO_DIR)/protoc-gen-go \
--plugin=protoc-gen-go-frostfs=$(PROTOGEN_FROSTFS_DIR)/protogen \
--go-frostfs_out=. --go-frostfs_opt=paths=source_relative \
- --go_out=. --go_opt=paths=source_relative \
--go-grpc_opt=require_unimplemented_servers=false \
--go-grpc_out=. --go-grpc_opt=paths=source_relative $$f; \
done
@@ -130,8 +126,6 @@ protoc-install:
@wget -q -O $(PROTOBUF_DIR)/protoc-$(PROTOC_VERSION).zip 'https://github.com/protocolbuffers/protobuf/releases/download/v$(PROTOC_VERSION)/protoc-$(PROTOC_VERSION)-$(PROTOC_OS_VERSION).zip'
@unzip -q -o $(PROTOBUF_DIR)/protoc-$(PROTOC_VERSION).zip -d $(PROTOC_DIR)
@rm $(PROTOBUF_DIR)/protoc-$(PROTOC_VERSION).zip
- @echo "⇒ Installing protoc-gen-go..."
- @GOBIN=$(PROTOC_GEN_GO_DIR) go install -v google.golang.org/protobuf/...@$(PROTOC_GEN_GO_VERSION)
@echo "⇒ Instaling protogen FrostFS plugin..."
@GOBIN=$(PROTOGEN_FROSTFS_DIR) go install -mod=mod -v git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/protogen@$(PROTOGEN_FROSTFS_VERSION)
diff --git a/cmd/frostfs-cli/modules/control/set_netmap_status.go b/cmd/frostfs-cli/modules/control/set_netmap_status.go
index 31ade1eb9..a107b2b53 100644
--- a/cmd/frostfs-cli/modules/control/set_netmap_status.go
+++ b/cmd/frostfs-cli/modules/control/set_netmap_status.go
@@ -84,7 +84,7 @@ func setNetmapStatus(cmd *cobra.Command, _ []string) {
body.SetStatus(control.NetmapStatus_MAINTENANCE)
if force {
- body.SetForceMaintenance()
+ body.SetForceMaintenance(true)
common.PrintVerbose(cmd, "Local maintenance will be forced.")
}
targetStatus = control.NetmapStatus_MAINTENANCE
diff --git a/cmd/frostfs-cli/modules/control/shards_set_mode.go b/cmd/frostfs-cli/modules/control/shards_set_mode.go
index e73f15178..dd0d77748 100644
--- a/cmd/frostfs-cli/modules/control/shards_set_mode.go
+++ b/cmd/frostfs-cli/modules/control/shards_set_mode.go
@@ -117,10 +117,10 @@ func setShardMode(cmd *cobra.Command, _ []string) {
req.SetBody(body)
body.SetMode(mode)
- body.SetShardIDList(getShardIDList(cmd))
+ body.SetShard_ID(getShardIDList(cmd))
reset, _ := cmd.Flags().GetBool(shardClearErrorsFlag)
- body.ClearErrorCounter(reset)
+ body.SetResetErrorCounter(reset)
signRequest(cmd, pk, req)
diff --git a/cmd/frostfs-cli/modules/control/util.go b/cmd/frostfs-cli/modules/control/util.go
index c0577ac0c..ef547681f 100644
--- a/cmd/frostfs-cli/modules/control/util.go
+++ b/cmd/frostfs-cli/modules/control/util.go
@@ -44,7 +44,7 @@ func verifyResponse(cmd *cobra.Command,
GetSign() []byte
},
body interface {
- StableMarshal([]byte) []byte
+ MarshalProtobuf([]byte) []byte
},
) {
if sigControl == nil {
@@ -60,7 +60,7 @@ func verifyResponse(cmd *cobra.Command,
var sig frostfscrypto.Signature
commonCmd.ExitOnErr(cmd, "can't read signature: %w", sig.ReadFromV2(sigV2))
- if !sig.Verify(body.StableMarshal(nil)) {
+ if !sig.Verify(body.MarshalProtobuf(nil)) {
commonCmd.ExitOnErr(cmd, "", errors.New("invalid response signature"))
}
}
diff --git a/cmd/frostfs-node/netmap.go b/cmd/frostfs-node/netmap.go
index 128cc3005..8104b1dc1 100644
--- a/cmd/frostfs-node/netmap.go
+++ b/cmd/frostfs-node/netmap.go
@@ -84,7 +84,7 @@ func (s *networkState) setNodeInfo(ni *netmapSDK.NodeInfo) {
}
}
- s.setControlNetmapStatus(ctrlNetSt)
+ s.setControlNetmapStatus(control.NetmapStatus(ctrlNetSt))
}
// sets the current node state to the given value. Subsequent cfg.bootstrap
diff --git a/go.mod b/go.mod
index 79dcc9553..b665709cc 100644
--- a/go.mod
+++ b/go.mod
@@ -4,16 +4,17 @@ go 1.22
require (
code.gitea.io/sdk/gitea v0.17.1
- git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240813155151-d112a28d382f
+ git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240819074700-a43110e36326
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20231101111734-b3ad3335ff65
- git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240813155821-98aabc45a720
+ git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240820072028-6dd7be11d13b
git.frostfs.info/TrueCloudLab/hrw v1.2.1
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240814080254-96225afacb88
git.frostfs.info/TrueCloudLab/tzhash v1.8.0
git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02
+ github.com/VictoriaMetrics/easyproto v0.1.4
github.com/cheggaaa/pb v1.0.29
github.com/chzyer/readline v1.5.1
github.com/flynn-archive/go-shlex v0.0.0-20150515145356-3f9db97f8568
@@ -21,6 +22,7 @@ require (
github.com/google/uuid v1.6.0
github.com/hashicorp/golang-lru/v2 v2.0.7
github.com/klauspost/compress v1.17.4
+ github.com/mailru/easyjson v0.7.7
github.com/mitchellh/go-homedir v1.1.0
github.com/mr-tron/base58 v1.2.0
github.com/multiformats/go-multiaddr v0.12.1
@@ -84,7 +86,6 @@ require (
github.com/klauspost/cpuid/v2 v2.2.6 // indirect
github.com/klauspost/reedsolomon v1.12.1 // indirect
github.com/magiconair/properties v1.8.7 // indirect
- github.com/mailru/easyjson v0.7.7 // indirect
github.com/mattn/go-runewidth v0.0.15 // indirect
github.com/minio/sha256-simd v1.0.1 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
diff --git a/go.sum b/go.sum
index 803a065c3..54e0d0301 100644
--- a/go.sum
+++ b/go.sum
@@ -1,7 +1,7 @@
code.gitea.io/sdk/gitea v0.17.1 h1:3jCPOG2ojbl8AcfaUCRYLT5MUcBMFwS0OSK2mA5Zok8=
code.gitea.io/sdk/gitea v0.17.1/go.mod h1:aCnBqhHpoEWA180gMbaCtdX9Pl6BWBAuuP2miadoTNM=
-git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240813155151-d112a28d382f h1:xrJqsXOZeSkBFMSyN+PQ9DiCGxVULU3VIN/tuH/vtb8=
-git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240813155151-d112a28d382f/go.mod h1:mc7j6Cc1GU1tJZNmDwEYiJJ339biNnU1Bz3wZGogMe0=
+git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240819074700-a43110e36326 h1:TkH+NSsY4C/Z8MocIJyMcqLm5vEhZcSowOldJyilKKA=
+git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240819074700-a43110e36326/go.mod h1:zZnHiRv9m5+ESYLhBXY9Jds9A/YIDEUGiuyPUS09HwM=
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e h1:kcBqZBiFIUBATUqEuvVigtkJJWQ2Gug/eYXn967o3M4=
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e/go.mod h1:F/fe1OoIDKr5Bz99q4sriuHDuf3aZefZy9ZsCqEtgxc=
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSVCB8JNSfPG7Uk4r2oSk=
@@ -10,8 +10,8 @@ git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d/go.mod h1:7ZZq8iguY7qFsXajdHGmZd2AW4QbucyrJwhbsRfOfek=
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20231101111734-b3ad3335ff65 h1:PaZ8GpnUoXxUoNsc1qp36bT2u7FU+neU4Jn9cl8AWqI=
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20231101111734-b3ad3335ff65/go.mod h1:6aAX80dvJ3r5fjN9CzzPglRptoiPgIC9KFGGsUA+1Hw=
-git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240813155821-98aabc45a720 h1:15UXpW42bfshIv/X5kww92jG2o0drHgsdFd+UJ6zD7g=
-git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240813155821-98aabc45a720/go.mod h1:XRX/bBQsDJKr040N/a0YnDhxJqaUv1XyMVj3qxnb5K0=
+git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240820072028-6dd7be11d13b h1:ZCJBVmQDcdv0twpX9xJU/AQwX+dXyvVfqr0Pq3x+3yk=
+git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240820072028-6dd7be11d13b/go.mod h1:aaC2OR34tVrBwd0Z2gqoN5WLtV/idKqpqPDhb4XqmCo=
git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc=
git.frostfs.info/TrueCloudLab/hrw v1.2.1/go.mod h1:C1Ygde2n843yTZEQ0FP69jYiuaYV0kriLvP4zm8JuvM=
git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20240726093631-5481339d6928 h1:LK3mCkNZkY48eBA9jnk1N0eQZLsZhOG+XYw4EBoKUjM=
@@ -25,6 +25,8 @@ git.frostfs.info/TrueCloudLab/tzhash v1.8.0/go.mod h1:dhY+oy274hV8wGvGL4MwwMpdL3
git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02 h1:HeY8n27VyPRQe49l/fzyVMkWEB2fsLJYKp64pwA7tz4=
git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02/go.mod h1:rQFJJdEOV7KbbMtQYR2lNfiZk+ONRDJSbMCTWxKt8Fw=
github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
+github.com/VictoriaMetrics/easyproto v0.1.4 h1:r8cNvo8o6sR4QShBXQd1bKw/VVLSQma/V2KhTBPf+Sc=
+github.com/VictoriaMetrics/easyproto v0.1.4/go.mod h1:QlGlzaJnDfFd8Lk6Ci/fuLxfTo3/GThPs2KH23mv710=
github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI=
github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
diff --git a/pkg/local_object_storage/metabase/get.go b/pkg/local_object_storage/metabase/get.go
index b79f6cb14..d979b4f0f 100644
--- a/pkg/local_object_storage/metabase/get.go
+++ b/pkg/local_object_storage/metabase/get.go
@@ -1,6 +1,7 @@
package meta
import (
+ "bytes"
"context"
"fmt"
"time"
@@ -107,7 +108,7 @@ func (db *DB) get(tx *bbolt.Tx, addr oid.Address, key []byte, checkStatus, raw b
// check in primary index
data := getFromBucket(tx, primaryBucketName(cnr, bucketName), key)
if len(data) != 0 {
- return obj, obj.Unmarshal(data)
+ return obj, obj.Unmarshal(bytes.Clone(data))
}
data = getFromBucket(tx, ecInfoBucketName(cnr, bucketName), key)
@@ -118,13 +119,13 @@ func (db *DB) get(tx *bbolt.Tx, addr oid.Address, key []byte, checkStatus, raw b
// if not found then check in tombstone index
data = getFromBucket(tx, tombstoneBucketName(cnr, bucketName), key)
if len(data) != 0 {
- return obj, obj.Unmarshal(data)
+ return obj, obj.Unmarshal(bytes.Clone(data))
}
// if not found then check in locker index
data = getFromBucket(tx, bucketNameLockers(cnr, bucketName), key)
if len(data) != 0 {
- return obj, obj.Unmarshal(data)
+ return obj, obj.Unmarshal(bytes.Clone(data))
}
// if not found then check if object is a virtual
diff --git a/pkg/services/control/common_test.go b/pkg/services/control/common_test.go
deleted file mode 100644
index bc512b4be..000000000
--- a/pkg/services/control/common_test.go
+++ /dev/null
@@ -1,33 +0,0 @@
-package control_test
-
-import (
- "crypto/rand"
- "testing"
-
- "github.com/mr-tron/base58"
- "github.com/stretchr/testify/require"
- "google.golang.org/protobuf/proto"
-)
-
-type protoMessage interface {
- StableMarshal([]byte) []byte
- proto.Message
-}
-
-func testStableMarshal(t *testing.T, m1, m2 protoMessage, cmp func(m1, m2 protoMessage) bool) {
- require.NoError(t, proto.Unmarshal(m1.StableMarshal(nil), m2))
-
- require.True(t, cmp(m1, m2))
-}
-
-func testData(sz int) []byte {
- d := make([]byte, sz)
-
- _, _ = rand.Read(d)
-
- return d
-}
-
-func testString() string {
- return base58.Encode(testData(10))
-}
diff --git a/pkg/services/control/ir/service.go b/pkg/services/control/ir/service.go
deleted file mode 100644
index b2db2b43a..000000000
--- a/pkg/services/control/ir/service.go
+++ /dev/null
@@ -1,46 +0,0 @@
-package control
-
-// SetBody sets health check request body.
-func (x *HealthCheckRequest) SetBody(v *HealthCheckRequest_Body) {
- if x != nil {
- x.Body = v
- }
-}
-
-// SetHealthStatus sets health status of the IR application.
-func (x *HealthCheckResponse_Body) SetHealthStatus(v HealthStatus) {
- if x != nil {
- x.HealthStatus = v
- }
-}
-
-// SetBody sets health check response body.
-func (x *HealthCheckResponse) SetBody(v *HealthCheckResponse_Body) {
- if x != nil {
- x.Body = v
- }
-}
-
-func (x *TickEpochRequest) SetBody(v *TickEpochRequest_Body) {
- if x != nil {
- x.Body = v
- }
-}
-
-func (x *TickEpochResponse) SetBody(v *TickEpochResponse_Body) {
- if x != nil {
- x.Body = v
- }
-}
-
-func (x *RemoveNodeRequest) SetBody(v *RemoveNodeRequest_Body) {
- if x != nil {
- x.Body = v
- }
-}
-
-func (x *RemoveNodeResponse) SetBody(v *RemoveNodeResponse_Body) {
- if x != nil {
- x.Body = v
- }
-}
diff --git a/pkg/services/control/ir/service.pb.go b/pkg/services/control/ir/service.pb.go
deleted file mode 100644
index d1e253bf5..000000000
--- a/pkg/services/control/ir/service.pb.go
+++ /dev/null
@@ -1,1277 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// versions:
-// protoc-gen-go v1.33.0
-// protoc v4.25.0
-// source: pkg/services/control/ir/service.proto
-
-package control
-
-import (
- protoreflect "google.golang.org/protobuf/reflect/protoreflect"
- protoimpl "google.golang.org/protobuf/runtime/protoimpl"
- reflect "reflect"
- sync "sync"
-)
-
-const (
- // Verify that this generated code is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
- // Verify that runtime/protoimpl is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
-)
-
-// Health check request.
-type HealthCheckRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Body of health check request message.
- Body *HealthCheckRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- // Body signature.
- // Should be signed by node key or one of
- // the keys configured by the node.
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *HealthCheckRequest) Reset() {
- *x = HealthCheckRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_ir_service_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *HealthCheckRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*HealthCheckRequest) ProtoMessage() {}
-
-func (x *HealthCheckRequest) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_ir_service_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use HealthCheckRequest.ProtoReflect.Descriptor instead.
-func (*HealthCheckRequest) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_ir_service_proto_rawDescGZIP(), []int{0}
-}
-
-func (x *HealthCheckRequest) GetBody() *HealthCheckRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *HealthCheckRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-// Health check response.
-type HealthCheckResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Body of health check response message.
- Body *HealthCheckResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- // Body signature.
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *HealthCheckResponse) Reset() {
- *x = HealthCheckResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_ir_service_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *HealthCheckResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*HealthCheckResponse) ProtoMessage() {}
-
-func (x *HealthCheckResponse) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_ir_service_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use HealthCheckResponse.ProtoReflect.Descriptor instead.
-func (*HealthCheckResponse) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_ir_service_proto_rawDescGZIP(), []int{1}
-}
-
-func (x *HealthCheckResponse) GetBody() *HealthCheckResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *HealthCheckResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-type TickEpochRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Body *TickEpochRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *TickEpochRequest) Reset() {
- *x = TickEpochRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_ir_service_proto_msgTypes[2]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *TickEpochRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*TickEpochRequest) ProtoMessage() {}
-
-func (x *TickEpochRequest) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_ir_service_proto_msgTypes[2]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use TickEpochRequest.ProtoReflect.Descriptor instead.
-func (*TickEpochRequest) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_ir_service_proto_rawDescGZIP(), []int{2}
-}
-
-func (x *TickEpochRequest) GetBody() *TickEpochRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *TickEpochRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-type TickEpochResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Body *TickEpochResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *TickEpochResponse) Reset() {
- *x = TickEpochResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_ir_service_proto_msgTypes[3]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *TickEpochResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*TickEpochResponse) ProtoMessage() {}
-
-func (x *TickEpochResponse) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_ir_service_proto_msgTypes[3]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use TickEpochResponse.ProtoReflect.Descriptor instead.
-func (*TickEpochResponse) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_ir_service_proto_rawDescGZIP(), []int{3}
-}
-
-func (x *TickEpochResponse) GetBody() *TickEpochResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *TickEpochResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-type RemoveNodeRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Body *RemoveNodeRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *RemoveNodeRequest) Reset() {
- *x = RemoveNodeRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_ir_service_proto_msgTypes[4]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *RemoveNodeRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*RemoveNodeRequest) ProtoMessage() {}
-
-func (x *RemoveNodeRequest) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_ir_service_proto_msgTypes[4]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use RemoveNodeRequest.ProtoReflect.Descriptor instead.
-func (*RemoveNodeRequest) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_ir_service_proto_rawDescGZIP(), []int{4}
-}
-
-func (x *RemoveNodeRequest) GetBody() *RemoveNodeRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *RemoveNodeRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-type RemoveNodeResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Body *RemoveNodeResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *RemoveNodeResponse) Reset() {
- *x = RemoveNodeResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_ir_service_proto_msgTypes[5]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *RemoveNodeResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*RemoveNodeResponse) ProtoMessage() {}
-
-func (x *RemoveNodeResponse) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_ir_service_proto_msgTypes[5]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use RemoveNodeResponse.ProtoReflect.Descriptor instead.
-func (*RemoveNodeResponse) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_ir_service_proto_rawDescGZIP(), []int{5}
-}
-
-func (x *RemoveNodeResponse) GetBody() *RemoveNodeResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *RemoveNodeResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-type RemoveContainerRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Body *RemoveContainerRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *RemoveContainerRequest) Reset() {
- *x = RemoveContainerRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_ir_service_proto_msgTypes[6]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *RemoveContainerRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*RemoveContainerRequest) ProtoMessage() {}
-
-func (x *RemoveContainerRequest) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_ir_service_proto_msgTypes[6]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use RemoveContainerRequest.ProtoReflect.Descriptor instead.
-func (*RemoveContainerRequest) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_ir_service_proto_rawDescGZIP(), []int{6}
-}
-
-func (x *RemoveContainerRequest) GetBody() *RemoveContainerRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *RemoveContainerRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-type RemoveContainerResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Body *RemoveContainerResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *RemoveContainerResponse) Reset() {
- *x = RemoveContainerResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_ir_service_proto_msgTypes[7]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *RemoveContainerResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*RemoveContainerResponse) ProtoMessage() {}
-
-func (x *RemoveContainerResponse) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_ir_service_proto_msgTypes[7]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use RemoveContainerResponse.ProtoReflect.Descriptor instead.
-func (*RemoveContainerResponse) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_ir_service_proto_rawDescGZIP(), []int{7}
-}
-
-func (x *RemoveContainerResponse) GetBody() *RemoveContainerResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *RemoveContainerResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-// Health check request body.
-type HealthCheckRequest_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-}
-
-func (x *HealthCheckRequest_Body) Reset() {
- *x = HealthCheckRequest_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_ir_service_proto_msgTypes[8]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *HealthCheckRequest_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*HealthCheckRequest_Body) ProtoMessage() {}
-
-func (x *HealthCheckRequest_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_ir_service_proto_msgTypes[8]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use HealthCheckRequest_Body.ProtoReflect.Descriptor instead.
-func (*HealthCheckRequest_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_ir_service_proto_rawDescGZIP(), []int{0, 0}
-}
-
-// Health check response body
-type HealthCheckResponse_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Health status of IR node application.
- HealthStatus HealthStatus `protobuf:"varint,1,opt,name=health_status,json=healthStatus,proto3,enum=ircontrol.HealthStatus" json:"health_status,omitempty"`
-}
-
-func (x *HealthCheckResponse_Body) Reset() {
- *x = HealthCheckResponse_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_ir_service_proto_msgTypes[9]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *HealthCheckResponse_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*HealthCheckResponse_Body) ProtoMessage() {}
-
-func (x *HealthCheckResponse_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_ir_service_proto_msgTypes[9]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use HealthCheckResponse_Body.ProtoReflect.Descriptor instead.
-func (*HealthCheckResponse_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_ir_service_proto_rawDescGZIP(), []int{1, 0}
-}
-
-func (x *HealthCheckResponse_Body) GetHealthStatus() HealthStatus {
- if x != nil {
- return x.HealthStatus
- }
- return HealthStatus_HEALTH_STATUS_UNDEFINED
-}
-
-type TickEpochRequest_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Valid until block value override.
- Vub uint32 `protobuf:"varint,1,opt,name=vub,proto3" json:"vub,omitempty"`
-}
-
-func (x *TickEpochRequest_Body) Reset() {
- *x = TickEpochRequest_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_ir_service_proto_msgTypes[10]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *TickEpochRequest_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*TickEpochRequest_Body) ProtoMessage() {}
-
-func (x *TickEpochRequest_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_ir_service_proto_msgTypes[10]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use TickEpochRequest_Body.ProtoReflect.Descriptor instead.
-func (*TickEpochRequest_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_ir_service_proto_rawDescGZIP(), []int{2, 0}
-}
-
-func (x *TickEpochRequest_Body) GetVub() uint32 {
- if x != nil {
- return x.Vub
- }
- return 0
-}
-
-type TickEpochResponse_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Valid until block value for transaction.
- Vub uint32 `protobuf:"varint,1,opt,name=vub,proto3" json:"vub,omitempty"`
-}
-
-func (x *TickEpochResponse_Body) Reset() {
- *x = TickEpochResponse_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_ir_service_proto_msgTypes[11]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *TickEpochResponse_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*TickEpochResponse_Body) ProtoMessage() {}
-
-func (x *TickEpochResponse_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_ir_service_proto_msgTypes[11]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use TickEpochResponse_Body.ProtoReflect.Descriptor instead.
-func (*TickEpochResponse_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_ir_service_proto_rawDescGZIP(), []int{3, 0}
-}
-
-func (x *TickEpochResponse_Body) GetVub() uint32 {
- if x != nil {
- return x.Vub
- }
- return 0
-}
-
-type RemoveNodeRequest_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
- // Valid until block value override.
- Vub uint32 `protobuf:"varint,2,opt,name=vub,proto3" json:"vub,omitempty"`
-}
-
-func (x *RemoveNodeRequest_Body) Reset() {
- *x = RemoveNodeRequest_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_ir_service_proto_msgTypes[12]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *RemoveNodeRequest_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*RemoveNodeRequest_Body) ProtoMessage() {}
-
-func (x *RemoveNodeRequest_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_ir_service_proto_msgTypes[12]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use RemoveNodeRequest_Body.ProtoReflect.Descriptor instead.
-func (*RemoveNodeRequest_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_ir_service_proto_rawDescGZIP(), []int{4, 0}
-}
-
-func (x *RemoveNodeRequest_Body) GetKey() []byte {
- if x != nil {
- return x.Key
- }
- return nil
-}
-
-func (x *RemoveNodeRequest_Body) GetVub() uint32 {
- if x != nil {
- return x.Vub
- }
- return 0
-}
-
-type RemoveNodeResponse_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Valid until block value for transaction.
- Vub uint32 `protobuf:"varint,1,opt,name=vub,proto3" json:"vub,omitempty"`
-}
-
-func (x *RemoveNodeResponse_Body) Reset() {
- *x = RemoveNodeResponse_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_ir_service_proto_msgTypes[13]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *RemoveNodeResponse_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*RemoveNodeResponse_Body) ProtoMessage() {}
-
-func (x *RemoveNodeResponse_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_ir_service_proto_msgTypes[13]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use RemoveNodeResponse_Body.ProtoReflect.Descriptor instead.
-func (*RemoveNodeResponse_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_ir_service_proto_rawDescGZIP(), []int{5, 0}
-}
-
-func (x *RemoveNodeResponse_Body) GetVub() uint32 {
- if x != nil {
- return x.Vub
- }
- return 0
-}
-
-type RemoveContainerRequest_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- ContainerId []byte `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
- Owner []byte `protobuf:"bytes,2,opt,name=owner,proto3" json:"owner,omitempty"`
- // Valid until block value override.
- Vub uint32 `protobuf:"varint,3,opt,name=vub,proto3" json:"vub,omitempty"`
-}
-
-func (x *RemoveContainerRequest_Body) Reset() {
- *x = RemoveContainerRequest_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_ir_service_proto_msgTypes[14]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *RemoveContainerRequest_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*RemoveContainerRequest_Body) ProtoMessage() {}
-
-func (x *RemoveContainerRequest_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_ir_service_proto_msgTypes[14]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use RemoveContainerRequest_Body.ProtoReflect.Descriptor instead.
-func (*RemoveContainerRequest_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_ir_service_proto_rawDescGZIP(), []int{6, 0}
-}
-
-func (x *RemoveContainerRequest_Body) GetContainerId() []byte {
- if x != nil {
- return x.ContainerId
- }
- return nil
-}
-
-func (x *RemoveContainerRequest_Body) GetOwner() []byte {
- if x != nil {
- return x.Owner
- }
- return nil
-}
-
-func (x *RemoveContainerRequest_Body) GetVub() uint32 {
- if x != nil {
- return x.Vub
- }
- return 0
-}
-
-type RemoveContainerResponse_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Valid until block value for transaction.
- Vub uint32 `protobuf:"varint,1,opt,name=vub,proto3" json:"vub,omitempty"`
-}
-
-func (x *RemoveContainerResponse_Body) Reset() {
- *x = RemoveContainerResponse_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_ir_service_proto_msgTypes[15]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *RemoveContainerResponse_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*RemoveContainerResponse_Body) ProtoMessage() {}
-
-func (x *RemoveContainerResponse_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_ir_service_proto_msgTypes[15]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use RemoveContainerResponse_Body.ProtoReflect.Descriptor instead.
-func (*RemoveContainerResponse_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_ir_service_proto_rawDescGZIP(), []int{7, 0}
-}
-
-func (x *RemoveContainerResponse_Body) GetVub() uint32 {
- if x != nil {
- return x.Vub
- }
- return 0
-}
-
-var File_pkg_services_control_ir_service_proto protoreflect.FileDescriptor
-
-var file_pkg_services_control_ir_service_proto_rawDesc = []byte{
- 0x0a, 0x25, 0x70, 0x6b, 0x67, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x63,
- 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2f, 0x69, 0x72, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63,
- 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x69, 0x72, 0x63, 0x6f, 0x6e, 0x74, 0x72,
- 0x6f, 0x6c, 0x1a, 0x23, 0x70, 0x6b, 0x67, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73,
- 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2f, 0x69, 0x72, 0x2f, 0x74, 0x79, 0x70, 0x65,
- 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x88, 0x01, 0x0a, 0x12, 0x48, 0x65, 0x61, 0x6c,
- 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x36,
- 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x69,
- 0x72, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43,
- 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79,
- 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x32, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74,
- 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x69, 0x72, 0x63, 0x6f,
- 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52,
- 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x06, 0x0a, 0x04, 0x42, 0x6f,
- 0x64, 0x79, 0x22, 0xc8, 0x01, 0x0a, 0x13, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65,
- 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x04, 0x62, 0x6f,
- 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x69, 0x72, 0x63, 0x6f, 0x6e,
- 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62,
- 0x6f, 0x64, 0x79, 0x12, 0x32, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65,
- 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x69, 0x72, 0x63, 0x6f, 0x6e, 0x74, 0x72,
- 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69,
- 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x44, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12,
- 0x3c, 0x0a, 0x0d, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x69, 0x72, 0x63, 0x6f, 0x6e, 0x74, 0x72,
- 0x6f, 0x6c, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52,
- 0x0c, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x96, 0x01,
- 0x0a, 0x10, 0x54, 0x69, 0x63, 0x6b, 0x45, 0x70, 0x6f, 0x63, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x12, 0x34, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x20, 0x2e, 0x69, 0x72, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x54, 0x69, 0x63,
- 0x6b, 0x45, 0x70, 0x6f, 0x63, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f,
- 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x32, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e,
- 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x69, 0x72,
- 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72,
- 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x18, 0x0a, 0x04,
- 0x42, 0x6f, 0x64, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x76, 0x75, 0x62, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x0d, 0x52, 0x03, 0x76, 0x75, 0x62, 0x22, 0x98, 0x01, 0x0a, 0x11, 0x54, 0x69, 0x63, 0x6b, 0x45,
- 0x70, 0x6f, 0x63, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x35, 0x0a, 0x04,
- 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x69, 0x72, 0x63,
- 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x54, 0x69, 0x63, 0x6b, 0x45, 0x70, 0x6f, 0x63, 0x68,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62,
- 0x6f, 0x64, 0x79, 0x12, 0x32, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65,
- 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x69, 0x72, 0x63, 0x6f, 0x6e, 0x74, 0x72,
- 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69,
- 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x18, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12,
- 0x10, 0x0a, 0x03, 0x76, 0x75, 0x62, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x76, 0x75,
- 0x62, 0x22, 0xaa, 0x01, 0x0a, 0x11, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4e, 0x6f, 0x64, 0x65,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x35, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x69, 0x72, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f,
- 0x6c, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x32,
- 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x14, 0x2e, 0x69, 0x72, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69,
- 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75,
- 0x72, 0x65, 0x1a, 0x2a, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65,
- 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x10, 0x0a, 0x03,
- 0x76, 0x75, 0x62, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x76, 0x75, 0x62, 0x22, 0x9a,
- 0x01, 0x0a, 0x12, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x36, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x69, 0x72, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e,
- 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x32, 0x0a,
- 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x14, 0x2e, 0x69, 0x72, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67,
- 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72,
- 0x65, 0x1a, 0x18, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x76, 0x75, 0x62,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x76, 0x75, 0x62, 0x22, 0xdb, 0x01, 0x0a, 0x16,
- 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3a, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x69, 0x72, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c,
- 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f,
- 0x64, 0x79, 0x12, 0x32, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x69, 0x72, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f,
- 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67,
- 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x51, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x21,
- 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49,
- 0x64, 0x12, 0x14, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c,
- 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x10, 0x0a, 0x03, 0x76, 0x75, 0x62, 0x18, 0x03,
- 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x76, 0x75, 0x62, 0x22, 0xa4, 0x01, 0x0a, 0x17, 0x52, 0x65,
- 0x6d, 0x6f, 0x76, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3b, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x69, 0x72, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e,
- 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x52,
- 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f,
- 0x64, 0x79, 0x12, 0x32, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x69, 0x72, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f,
- 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67,
- 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x18, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x10,
- 0x0a, 0x03, 0x76, 0x75, 0x62, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x76, 0x75, 0x62,
- 0x32, 0xcb, 0x02, 0x0a, 0x0e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x53, 0x65, 0x72, 0x76,
- 0x69, 0x63, 0x65, 0x12, 0x4c, 0x0a, 0x0b, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65,
- 0x63, 0x6b, 0x12, 0x1d, 0x2e, 0x69, 0x72, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x48,
- 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x1a, 0x1e, 0x2e, 0x69, 0x72, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x48, 0x65,
- 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
- 0x65, 0x12, 0x46, 0x0a, 0x09, 0x54, 0x69, 0x63, 0x6b, 0x45, 0x70, 0x6f, 0x63, 0x68, 0x12, 0x1b,
- 0x2e, 0x69, 0x72, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x54, 0x69, 0x63, 0x6b, 0x45,
- 0x70, 0x6f, 0x63, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x69, 0x72,
- 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x54, 0x69, 0x63, 0x6b, 0x45, 0x70, 0x6f, 0x63,
- 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x49, 0x0a, 0x0a, 0x52, 0x65, 0x6d,
- 0x6f, 0x76, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x1c, 0x2e, 0x69, 0x72, 0x63, 0x6f, 0x6e, 0x74,
- 0x72, 0x6f, 0x6c, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x69, 0x72, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f,
- 0x6c, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70,
- 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x58, 0x0a, 0x0f, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x43, 0x6f,
- 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x12, 0x21, 0x2e, 0x69, 0x72, 0x63, 0x6f, 0x6e, 0x74,
- 0x72, 0x6f, 0x6c, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69,
- 0x6e, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x69, 0x72, 0x63,
- 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x43, 0x6f, 0x6e,
- 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x44,
- 0x5a, 0x42, 0x67, 0x69, 0x74, 0x2e, 0x66, 0x72, 0x6f, 0x73, 0x74, 0x66, 0x73, 0x2e, 0x69, 0x6e,
- 0x66, 0x6f, 0x2f, 0x54, 0x72, 0x75, 0x65, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x4c, 0x61, 0x62, 0x2f,
- 0x66, 0x72, 0x6f, 0x73, 0x74, 0x66, 0x73, 0x2d, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x70, 0x6b, 0x67,
- 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x69, 0x72, 0x2f, 0x63, 0x6f, 0x6e,
- 0x74, 0x72, 0x6f, 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
-
-var (
- file_pkg_services_control_ir_service_proto_rawDescOnce sync.Once
- file_pkg_services_control_ir_service_proto_rawDescData = file_pkg_services_control_ir_service_proto_rawDesc
-)
-
-func file_pkg_services_control_ir_service_proto_rawDescGZIP() []byte {
- file_pkg_services_control_ir_service_proto_rawDescOnce.Do(func() {
- file_pkg_services_control_ir_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_pkg_services_control_ir_service_proto_rawDescData)
- })
- return file_pkg_services_control_ir_service_proto_rawDescData
-}
-
-var file_pkg_services_control_ir_service_proto_msgTypes = make([]protoimpl.MessageInfo, 16)
-var file_pkg_services_control_ir_service_proto_goTypes = []interface{}{
- (*HealthCheckRequest)(nil), // 0: ircontrol.HealthCheckRequest
- (*HealthCheckResponse)(nil), // 1: ircontrol.HealthCheckResponse
- (*TickEpochRequest)(nil), // 2: ircontrol.TickEpochRequest
- (*TickEpochResponse)(nil), // 3: ircontrol.TickEpochResponse
- (*RemoveNodeRequest)(nil), // 4: ircontrol.RemoveNodeRequest
- (*RemoveNodeResponse)(nil), // 5: ircontrol.RemoveNodeResponse
- (*RemoveContainerRequest)(nil), // 6: ircontrol.RemoveContainerRequest
- (*RemoveContainerResponse)(nil), // 7: ircontrol.RemoveContainerResponse
- (*HealthCheckRequest_Body)(nil), // 8: ircontrol.HealthCheckRequest.Body
- (*HealthCheckResponse_Body)(nil), // 9: ircontrol.HealthCheckResponse.Body
- (*TickEpochRequest_Body)(nil), // 10: ircontrol.TickEpochRequest.Body
- (*TickEpochResponse_Body)(nil), // 11: ircontrol.TickEpochResponse.Body
- (*RemoveNodeRequest_Body)(nil), // 12: ircontrol.RemoveNodeRequest.Body
- (*RemoveNodeResponse_Body)(nil), // 13: ircontrol.RemoveNodeResponse.Body
- (*RemoveContainerRequest_Body)(nil), // 14: ircontrol.RemoveContainerRequest.Body
- (*RemoveContainerResponse_Body)(nil), // 15: ircontrol.RemoveContainerResponse.Body
- (*Signature)(nil), // 16: ircontrol.Signature
- (HealthStatus)(0), // 17: ircontrol.HealthStatus
-}
-var file_pkg_services_control_ir_service_proto_depIdxs = []int32{
- 8, // 0: ircontrol.HealthCheckRequest.body:type_name -> ircontrol.HealthCheckRequest.Body
- 16, // 1: ircontrol.HealthCheckRequest.signature:type_name -> ircontrol.Signature
- 9, // 2: ircontrol.HealthCheckResponse.body:type_name -> ircontrol.HealthCheckResponse.Body
- 16, // 3: ircontrol.HealthCheckResponse.signature:type_name -> ircontrol.Signature
- 10, // 4: ircontrol.TickEpochRequest.body:type_name -> ircontrol.TickEpochRequest.Body
- 16, // 5: ircontrol.TickEpochRequest.signature:type_name -> ircontrol.Signature
- 11, // 6: ircontrol.TickEpochResponse.body:type_name -> ircontrol.TickEpochResponse.Body
- 16, // 7: ircontrol.TickEpochResponse.signature:type_name -> ircontrol.Signature
- 12, // 8: ircontrol.RemoveNodeRequest.body:type_name -> ircontrol.RemoveNodeRequest.Body
- 16, // 9: ircontrol.RemoveNodeRequest.signature:type_name -> ircontrol.Signature
- 13, // 10: ircontrol.RemoveNodeResponse.body:type_name -> ircontrol.RemoveNodeResponse.Body
- 16, // 11: ircontrol.RemoveNodeResponse.signature:type_name -> ircontrol.Signature
- 14, // 12: ircontrol.RemoveContainerRequest.body:type_name -> ircontrol.RemoveContainerRequest.Body
- 16, // 13: ircontrol.RemoveContainerRequest.signature:type_name -> ircontrol.Signature
- 15, // 14: ircontrol.RemoveContainerResponse.body:type_name -> ircontrol.RemoveContainerResponse.Body
- 16, // 15: ircontrol.RemoveContainerResponse.signature:type_name -> ircontrol.Signature
- 17, // 16: ircontrol.HealthCheckResponse.Body.health_status:type_name -> ircontrol.HealthStatus
- 0, // 17: ircontrol.ControlService.HealthCheck:input_type -> ircontrol.HealthCheckRequest
- 2, // 18: ircontrol.ControlService.TickEpoch:input_type -> ircontrol.TickEpochRequest
- 4, // 19: ircontrol.ControlService.RemoveNode:input_type -> ircontrol.RemoveNodeRequest
- 6, // 20: ircontrol.ControlService.RemoveContainer:input_type -> ircontrol.RemoveContainerRequest
- 1, // 21: ircontrol.ControlService.HealthCheck:output_type -> ircontrol.HealthCheckResponse
- 3, // 22: ircontrol.ControlService.TickEpoch:output_type -> ircontrol.TickEpochResponse
- 5, // 23: ircontrol.ControlService.RemoveNode:output_type -> ircontrol.RemoveNodeResponse
- 7, // 24: ircontrol.ControlService.RemoveContainer:output_type -> ircontrol.RemoveContainerResponse
- 21, // [21:25] is the sub-list for method output_type
- 17, // [17:21] is the sub-list for method input_type
- 17, // [17:17] is the sub-list for extension type_name
- 17, // [17:17] is the sub-list for extension extendee
- 0, // [0:17] is the sub-list for field type_name
-}
-
-func init() { file_pkg_services_control_ir_service_proto_init() }
-func file_pkg_services_control_ir_service_proto_init() {
- if File_pkg_services_control_ir_service_proto != nil {
- return
- }
- file_pkg_services_control_ir_types_proto_init()
- if !protoimpl.UnsafeEnabled {
- file_pkg_services_control_ir_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*HealthCheckRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_ir_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*HealthCheckResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_ir_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*TickEpochRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_ir_service_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*TickEpochResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_ir_service_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RemoveNodeRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_ir_service_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RemoveNodeResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_ir_service_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RemoveContainerRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_ir_service_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RemoveContainerResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_ir_service_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*HealthCheckRequest_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_ir_service_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*HealthCheckResponse_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_ir_service_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*TickEpochRequest_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_ir_service_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*TickEpochResponse_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_ir_service_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RemoveNodeRequest_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_ir_service_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RemoveNodeResponse_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_ir_service_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RemoveContainerRequest_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_ir_service_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RemoveContainerResponse_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
- type x struct{}
- out := protoimpl.TypeBuilder{
- File: protoimpl.DescBuilder{
- GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_pkg_services_control_ir_service_proto_rawDesc,
- NumEnums: 0,
- NumMessages: 16,
- NumExtensions: 0,
- NumServices: 1,
- },
- GoTypes: file_pkg_services_control_ir_service_proto_goTypes,
- DependencyIndexes: file_pkg_services_control_ir_service_proto_depIdxs,
- MessageInfos: file_pkg_services_control_ir_service_proto_msgTypes,
- }.Build()
- File_pkg_services_control_ir_service_proto = out.File
- file_pkg_services_control_ir_service_proto_rawDesc = nil
- file_pkg_services_control_ir_service_proto_goTypes = nil
- file_pkg_services_control_ir_service_proto_depIdxs = nil
-}
diff --git a/pkg/services/control/ir/service_frostfs.pb.go b/pkg/services/control/ir/service_frostfs.pb.go
index e22d0013f..786095802 100644
--- a/pkg/services/control/ir/service_frostfs.pb.go
+++ b/pkg/services/control/ir/service_frostfs.pb.go
@@ -2,7 +2,27 @@
package control
-import "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/proto"
+import (
+ json "encoding/json"
+ fmt "fmt"
+ pool "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/pool"
+ proto "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/proto"
+ encoding "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/proto/encoding"
+ easyproto "github.com/VictoriaMetrics/easyproto"
+ jlexer "github.com/mailru/easyjson/jlexer"
+ jwriter "github.com/mailru/easyjson/jwriter"
+ strconv "strconv"
+)
+
+type HealthCheckRequest_Body struct {
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*HealthCheckRequest_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*HealthCheckRequest_Body)(nil)
+ _ json.Marshaler = (*HealthCheckRequest_Body)(nil)
+ _ json.Unmarshaler = (*HealthCheckRequest_Body)(nil)
+)
// StableSize returns the size of x in protobuf format.
//
@@ -14,18 +34,93 @@ func (x *HealthCheckRequest_Body) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *HealthCheckRequest_Body) StableMarshal(buf []byte) []byte {
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *HealthCheckRequest_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *HealthCheckRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *HealthCheckRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "HealthCheckRequest_Body")
+ }
+ switch fc.FieldNum {
+ }
+ }
+ return nil
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *HealthCheckRequest_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *HealthCheckRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *HealthCheckRequest_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *HealthCheckRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type HealthCheckRequest struct {
+ Body *HealthCheckRequest_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*HealthCheckRequest)(nil)
+ _ encoding.ProtoUnmarshaler = (*HealthCheckRequest)(nil)
+ _ json.Marshaler = (*HealthCheckRequest)(nil)
+ _ json.Unmarshaler = (*HealthCheckRequest)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -38,27 +133,6 @@ func (x *HealthCheckRequest) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *HealthCheckRequest) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -74,13 +148,164 @@ func (x *HealthCheckRequest) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *HealthCheckRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *HealthCheckRequest) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *HealthCheckRequest) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *HealthCheckRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil && x.Body.StableSize() != 0 {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil && x.Signature.StableSize() != 0 {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *HealthCheckRequest) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "HealthCheckRequest")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(HealthCheckRequest_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *HealthCheckRequest) GetBody() *HealthCheckRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *HealthCheckRequest) SetBody(v *HealthCheckRequest_Body) {
+ x.Body = v
+}
+func (x *HealthCheckRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *HealthCheckRequest) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *HealthCheckRequest) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *HealthCheckRequest) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ const prefix string = ",\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *HealthCheckRequest) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *HealthCheckRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *HealthCheckRequest_Body
+ f = new(HealthCheckRequest_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type HealthCheckResponse_Body struct {
+ HealthStatus HealthStatus `json:"healthStatus"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*HealthCheckResponse_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*HealthCheckResponse_Body)(nil)
+ _ json.Marshaler = (*HealthCheckResponse_Body)(nil)
+ _ json.Unmarshaler = (*HealthCheckResponse_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -92,26 +317,141 @@ func (x *HealthCheckResponse_Body) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *HealthCheckResponse_Body) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.EnumMarshal(1, buf[offset:], int32(x.HealthStatus))
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *HealthCheckResponse_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *HealthCheckResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if int32(x.HealthStatus) != 0 {
+ mm.AppendInt32(1, int32(x.HealthStatus))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *HealthCheckResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "HealthCheckResponse_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // HealthStatus
+ data, ok := fc.Int32()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "HealthStatus")
+ }
+ x.HealthStatus = HealthStatus(data)
+ }
+ }
+ return nil
+}
+func (x *HealthCheckResponse_Body) GetHealthStatus() HealthStatus {
+ if x != nil {
+ return x.HealthStatus
+ }
+ return 0
+}
+func (x *HealthCheckResponse_Body) SetHealthStatus(v HealthStatus) {
+ x.HealthStatus = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *HealthCheckResponse_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *HealthCheckResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"healthStatus\":"
+ out.RawString(prefix[1:])
+ out.Int32(int32(x.HealthStatus))
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *HealthCheckResponse_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *HealthCheckResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "healthStatus":
+ {
+ var f HealthStatus
+ var parsedValue HealthStatus
+ switch v := in.Interface().(type) {
+ case string:
+ if vv, ok := HealthStatus_value[v]; ok {
+ parsedValue = HealthStatus(vv)
+ break
+ }
+ vv, err := strconv.ParseInt(v, 10, 32)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ parsedValue = HealthStatus(vv)
+ case float64:
+ parsedValue = HealthStatus(v)
+ }
+ f = parsedValue
+ x.HealthStatus = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type HealthCheckResponse struct {
+ Body *HealthCheckResponse_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*HealthCheckResponse)(nil)
+ _ encoding.ProtoUnmarshaler = (*HealthCheckResponse)(nil)
+ _ json.Marshaler = (*HealthCheckResponse)(nil)
+ _ json.Unmarshaler = (*HealthCheckResponse)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -124,27 +464,6 @@ func (x *HealthCheckResponse) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *HealthCheckResponse) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -160,13 +479,164 @@ func (x *HealthCheckResponse) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *HealthCheckResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *HealthCheckResponse) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *HealthCheckResponse) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *HealthCheckResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil && x.Body.StableSize() != 0 {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil && x.Signature.StableSize() != 0 {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *HealthCheckResponse) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "HealthCheckResponse")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(HealthCheckResponse_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *HealthCheckResponse) GetBody() *HealthCheckResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *HealthCheckResponse) SetBody(v *HealthCheckResponse_Body) {
+ x.Body = v
+}
+func (x *HealthCheckResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *HealthCheckResponse) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *HealthCheckResponse) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *HealthCheckResponse) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ const prefix string = ",\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *HealthCheckResponse) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *HealthCheckResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *HealthCheckResponse_Body
+ f = new(HealthCheckResponse_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type TickEpochRequest_Body struct {
+ Vub uint32 `json:"vub"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*TickEpochRequest_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*TickEpochRequest_Body)(nil)
+ _ json.Marshaler = (*TickEpochRequest_Body)(nil)
+ _ json.Unmarshaler = (*TickEpochRequest_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -178,26 +648,125 @@ func (x *TickEpochRequest_Body) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *TickEpochRequest_Body) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.UInt32Marshal(1, buf[offset:], x.Vub)
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *TickEpochRequest_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *TickEpochRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Vub != 0 {
+ mm.AppendUint32(1, x.Vub)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *TickEpochRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "TickEpochRequest_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // Vub
+ data, ok := fc.Uint32()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Vub")
+ }
+ x.Vub = data
+ }
+ }
+ return nil
+}
+func (x *TickEpochRequest_Body) GetVub() uint32 {
+ if x != nil {
+ return x.Vub
+ }
+ return 0
+}
+func (x *TickEpochRequest_Body) SetVub(v uint32) {
+ x.Vub = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *TickEpochRequest_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *TickEpochRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"vub\":"
+ out.RawString(prefix[1:])
+ out.Uint32(x.Vub)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *TickEpochRequest_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *TickEpochRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "vub":
+ {
+ var f uint32
+ f = in.Uint32()
+ x.Vub = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type TickEpochRequest struct {
+ Body *TickEpochRequest_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*TickEpochRequest)(nil)
+ _ encoding.ProtoUnmarshaler = (*TickEpochRequest)(nil)
+ _ json.Marshaler = (*TickEpochRequest)(nil)
+ _ json.Unmarshaler = (*TickEpochRequest)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -210,27 +779,6 @@ func (x *TickEpochRequest) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *TickEpochRequest) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -246,13 +794,164 @@ func (x *TickEpochRequest) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *TickEpochRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *TickEpochRequest) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *TickEpochRequest) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *TickEpochRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil && x.Body.StableSize() != 0 {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil && x.Signature.StableSize() != 0 {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *TickEpochRequest) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "TickEpochRequest")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(TickEpochRequest_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *TickEpochRequest) GetBody() *TickEpochRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *TickEpochRequest) SetBody(v *TickEpochRequest_Body) {
+ x.Body = v
+}
+func (x *TickEpochRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *TickEpochRequest) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *TickEpochRequest) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *TickEpochRequest) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ const prefix string = ",\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *TickEpochRequest) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *TickEpochRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *TickEpochRequest_Body
+ f = new(TickEpochRequest_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type TickEpochResponse_Body struct {
+ Vub uint32 `json:"vub"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*TickEpochResponse_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*TickEpochResponse_Body)(nil)
+ _ json.Marshaler = (*TickEpochResponse_Body)(nil)
+ _ json.Unmarshaler = (*TickEpochResponse_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -264,26 +963,125 @@ func (x *TickEpochResponse_Body) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *TickEpochResponse_Body) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.UInt32Marshal(1, buf[offset:], x.Vub)
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *TickEpochResponse_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *TickEpochResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Vub != 0 {
+ mm.AppendUint32(1, x.Vub)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *TickEpochResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "TickEpochResponse_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // Vub
+ data, ok := fc.Uint32()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Vub")
+ }
+ x.Vub = data
+ }
+ }
+ return nil
+}
+func (x *TickEpochResponse_Body) GetVub() uint32 {
+ if x != nil {
+ return x.Vub
+ }
+ return 0
+}
+func (x *TickEpochResponse_Body) SetVub(v uint32) {
+ x.Vub = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *TickEpochResponse_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *TickEpochResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"vub\":"
+ out.RawString(prefix[1:])
+ out.Uint32(x.Vub)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *TickEpochResponse_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *TickEpochResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "vub":
+ {
+ var f uint32
+ f = in.Uint32()
+ x.Vub = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type TickEpochResponse struct {
+ Body *TickEpochResponse_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*TickEpochResponse)(nil)
+ _ encoding.ProtoUnmarshaler = (*TickEpochResponse)(nil)
+ _ json.Marshaler = (*TickEpochResponse)(nil)
+ _ json.Unmarshaler = (*TickEpochResponse)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -296,27 +1094,6 @@ func (x *TickEpochResponse) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *TickEpochResponse) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -332,13 +1109,165 @@ func (x *TickEpochResponse) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *TickEpochResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *TickEpochResponse) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *TickEpochResponse) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *TickEpochResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil && x.Body.StableSize() != 0 {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil && x.Signature.StableSize() != 0 {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *TickEpochResponse) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "TickEpochResponse")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(TickEpochResponse_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *TickEpochResponse) GetBody() *TickEpochResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *TickEpochResponse) SetBody(v *TickEpochResponse_Body) {
+ x.Body = v
+}
+func (x *TickEpochResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *TickEpochResponse) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *TickEpochResponse) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *TickEpochResponse) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ const prefix string = ",\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *TickEpochResponse) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *TickEpochResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *TickEpochResponse_Body
+ f = new(TickEpochResponse_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type RemoveNodeRequest_Body struct {
+ Key []byte `json:"key"`
+ Vub uint32 `json:"vub"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*RemoveNodeRequest_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*RemoveNodeRequest_Body)(nil)
+ _ json.Marshaler = (*RemoveNodeRequest_Body)(nil)
+ _ json.Unmarshaler = (*RemoveNodeRequest_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -351,27 +1280,154 @@ func (x *RemoveNodeRequest_Body) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *RemoveNodeRequest_Body) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.BytesMarshal(1, buf[offset:], x.Key)
- offset += proto.UInt32Marshal(2, buf[offset:], x.Vub)
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *RemoveNodeRequest_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *RemoveNodeRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if len(x.Key) != 0 {
+ mm.AppendBytes(1, x.Key)
+ }
+ if x.Vub != 0 {
+ mm.AppendUint32(2, x.Vub)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *RemoveNodeRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "RemoveNodeRequest_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // Key
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Key")
+ }
+ x.Key = data
+ case 2: // Vub
+ data, ok := fc.Uint32()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Vub")
+ }
+ x.Vub = data
+ }
+ }
+ return nil
+}
+func (x *RemoveNodeRequest_Body) GetKey() []byte {
+ if x != nil {
+ return x.Key
+ }
+ return nil
+}
+func (x *RemoveNodeRequest_Body) SetKey(v []byte) {
+ x.Key = v
+}
+func (x *RemoveNodeRequest_Body) GetVub() uint32 {
+ if x != nil {
+ return x.Vub
+ }
+ return 0
+}
+func (x *RemoveNodeRequest_Body) SetVub(v uint32) {
+ x.Vub = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *RemoveNodeRequest_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *RemoveNodeRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"key\":"
+ out.RawString(prefix[1:])
+ out.Base64Bytes(x.Key)
+ }
+ {
+ const prefix string = ",\"vub\":"
+ out.RawString(prefix)
+ out.Uint32(x.Vub)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *RemoveNodeRequest_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *RemoveNodeRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "key":
+ {
+ var f []byte
+ f = in.Bytes()
+ x.Key = f
+ }
+ case "vub":
+ {
+ var f uint32
+ f = in.Uint32()
+ x.Vub = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type RemoveNodeRequest struct {
+ Body *RemoveNodeRequest_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*RemoveNodeRequest)(nil)
+ _ encoding.ProtoUnmarshaler = (*RemoveNodeRequest)(nil)
+ _ json.Marshaler = (*RemoveNodeRequest)(nil)
+ _ json.Unmarshaler = (*RemoveNodeRequest)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -384,27 +1440,6 @@ func (x *RemoveNodeRequest) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *RemoveNodeRequest) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -420,13 +1455,164 @@ func (x *RemoveNodeRequest) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *RemoveNodeRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *RemoveNodeRequest) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *RemoveNodeRequest) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *RemoveNodeRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil && x.Body.StableSize() != 0 {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil && x.Signature.StableSize() != 0 {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *RemoveNodeRequest) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "RemoveNodeRequest")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(RemoveNodeRequest_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *RemoveNodeRequest) GetBody() *RemoveNodeRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *RemoveNodeRequest) SetBody(v *RemoveNodeRequest_Body) {
+ x.Body = v
+}
+func (x *RemoveNodeRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *RemoveNodeRequest) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *RemoveNodeRequest) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *RemoveNodeRequest) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ const prefix string = ",\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *RemoveNodeRequest) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *RemoveNodeRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *RemoveNodeRequest_Body
+ f = new(RemoveNodeRequest_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type RemoveNodeResponse_Body struct {
+ Vub uint32 `json:"vub"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*RemoveNodeResponse_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*RemoveNodeResponse_Body)(nil)
+ _ json.Marshaler = (*RemoveNodeResponse_Body)(nil)
+ _ json.Unmarshaler = (*RemoveNodeResponse_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -438,26 +1624,125 @@ func (x *RemoveNodeResponse_Body) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *RemoveNodeResponse_Body) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.UInt32Marshal(1, buf[offset:], x.Vub)
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *RemoveNodeResponse_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *RemoveNodeResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Vub != 0 {
+ mm.AppendUint32(1, x.Vub)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *RemoveNodeResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "RemoveNodeResponse_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // Vub
+ data, ok := fc.Uint32()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Vub")
+ }
+ x.Vub = data
+ }
+ }
+ return nil
+}
+func (x *RemoveNodeResponse_Body) GetVub() uint32 {
+ if x != nil {
+ return x.Vub
+ }
+ return 0
+}
+func (x *RemoveNodeResponse_Body) SetVub(v uint32) {
+ x.Vub = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *RemoveNodeResponse_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *RemoveNodeResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"vub\":"
+ out.RawString(prefix[1:])
+ out.Uint32(x.Vub)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *RemoveNodeResponse_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *RemoveNodeResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "vub":
+ {
+ var f uint32
+ f = in.Uint32()
+ x.Vub = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type RemoveNodeResponse struct {
+ Body *RemoveNodeResponse_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*RemoveNodeResponse)(nil)
+ _ encoding.ProtoUnmarshaler = (*RemoveNodeResponse)(nil)
+ _ json.Marshaler = (*RemoveNodeResponse)(nil)
+ _ json.Unmarshaler = (*RemoveNodeResponse)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -470,27 +1755,6 @@ func (x *RemoveNodeResponse) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *RemoveNodeResponse) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -506,13 +1770,166 @@ func (x *RemoveNodeResponse) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *RemoveNodeResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *RemoveNodeResponse) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *RemoveNodeResponse) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *RemoveNodeResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil && x.Body.StableSize() != 0 {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil && x.Signature.StableSize() != 0 {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *RemoveNodeResponse) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "RemoveNodeResponse")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(RemoveNodeResponse_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *RemoveNodeResponse) GetBody() *RemoveNodeResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *RemoveNodeResponse) SetBody(v *RemoveNodeResponse_Body) {
+ x.Body = v
+}
+func (x *RemoveNodeResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *RemoveNodeResponse) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *RemoveNodeResponse) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *RemoveNodeResponse) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ const prefix string = ",\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *RemoveNodeResponse) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *RemoveNodeResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *RemoveNodeResponse_Body
+ f = new(RemoveNodeResponse_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type RemoveContainerRequest_Body struct {
+ ContainerId []byte `json:"containerId"`
+ Owner []byte `json:"owner"`
+ Vub uint32 `json:"vub"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*RemoveContainerRequest_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*RemoveContainerRequest_Body)(nil)
+ _ json.Marshaler = (*RemoveContainerRequest_Body)(nil)
+ _ json.Unmarshaler = (*RemoveContainerRequest_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -526,28 +1943,183 @@ func (x *RemoveContainerRequest_Body) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *RemoveContainerRequest_Body) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.BytesMarshal(1, buf[offset:], x.ContainerId)
- offset += proto.BytesMarshal(2, buf[offset:], x.Owner)
- offset += proto.UInt32Marshal(3, buf[offset:], x.Vub)
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *RemoveContainerRequest_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *RemoveContainerRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if len(x.ContainerId) != 0 {
+ mm.AppendBytes(1, x.ContainerId)
+ }
+ if len(x.Owner) != 0 {
+ mm.AppendBytes(2, x.Owner)
+ }
+ if x.Vub != 0 {
+ mm.AppendUint32(3, x.Vub)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *RemoveContainerRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "RemoveContainerRequest_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // ContainerId
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "ContainerId")
+ }
+ x.ContainerId = data
+ case 2: // Owner
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Owner")
+ }
+ x.Owner = data
+ case 3: // Vub
+ data, ok := fc.Uint32()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Vub")
+ }
+ x.Vub = data
+ }
+ }
+ return nil
+}
+func (x *RemoveContainerRequest_Body) GetContainerId() []byte {
+ if x != nil {
+ return x.ContainerId
+ }
+ return nil
+}
+func (x *RemoveContainerRequest_Body) SetContainerId(v []byte) {
+ x.ContainerId = v
+}
+func (x *RemoveContainerRequest_Body) GetOwner() []byte {
+ if x != nil {
+ return x.Owner
+ }
+ return nil
+}
+func (x *RemoveContainerRequest_Body) SetOwner(v []byte) {
+ x.Owner = v
+}
+func (x *RemoveContainerRequest_Body) GetVub() uint32 {
+ if x != nil {
+ return x.Vub
+ }
+ return 0
+}
+func (x *RemoveContainerRequest_Body) SetVub(v uint32) {
+ x.Vub = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *RemoveContainerRequest_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *RemoveContainerRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"containerId\":"
+ out.RawString(prefix[1:])
+ out.Base64Bytes(x.ContainerId)
+ }
+ {
+ const prefix string = ",\"owner\":"
+ out.RawString(prefix)
+ out.Base64Bytes(x.Owner)
+ }
+ {
+ const prefix string = ",\"vub\":"
+ out.RawString(prefix)
+ out.Uint32(x.Vub)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *RemoveContainerRequest_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *RemoveContainerRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "containerId":
+ {
+ var f []byte
+ f = in.Bytes()
+ x.ContainerId = f
+ }
+ case "owner":
+ {
+ var f []byte
+ f = in.Bytes()
+ x.Owner = f
+ }
+ case "vub":
+ {
+ var f uint32
+ f = in.Uint32()
+ x.Vub = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type RemoveContainerRequest struct {
+ Body *RemoveContainerRequest_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*RemoveContainerRequest)(nil)
+ _ encoding.ProtoUnmarshaler = (*RemoveContainerRequest)(nil)
+ _ json.Marshaler = (*RemoveContainerRequest)(nil)
+ _ json.Unmarshaler = (*RemoveContainerRequest)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -560,27 +2132,6 @@ func (x *RemoveContainerRequest) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *RemoveContainerRequest) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -596,13 +2147,164 @@ func (x *RemoveContainerRequest) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *RemoveContainerRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *RemoveContainerRequest) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *RemoveContainerRequest) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *RemoveContainerRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil && x.Body.StableSize() != 0 {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil && x.Signature.StableSize() != 0 {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *RemoveContainerRequest) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "RemoveContainerRequest")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(RemoveContainerRequest_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *RemoveContainerRequest) GetBody() *RemoveContainerRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *RemoveContainerRequest) SetBody(v *RemoveContainerRequest_Body) {
+ x.Body = v
+}
+func (x *RemoveContainerRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *RemoveContainerRequest) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *RemoveContainerRequest) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *RemoveContainerRequest) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ const prefix string = ",\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *RemoveContainerRequest) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *RemoveContainerRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *RemoveContainerRequest_Body
+ f = new(RemoveContainerRequest_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type RemoveContainerResponse_Body struct {
+ Vub uint32 `json:"vub"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*RemoveContainerResponse_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*RemoveContainerResponse_Body)(nil)
+ _ json.Marshaler = (*RemoveContainerResponse_Body)(nil)
+ _ json.Unmarshaler = (*RemoveContainerResponse_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -614,26 +2316,125 @@ func (x *RemoveContainerResponse_Body) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *RemoveContainerResponse_Body) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.UInt32Marshal(1, buf[offset:], x.Vub)
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *RemoveContainerResponse_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *RemoveContainerResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Vub != 0 {
+ mm.AppendUint32(1, x.Vub)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *RemoveContainerResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "RemoveContainerResponse_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // Vub
+ data, ok := fc.Uint32()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Vub")
+ }
+ x.Vub = data
+ }
+ }
+ return nil
+}
+func (x *RemoveContainerResponse_Body) GetVub() uint32 {
+ if x != nil {
+ return x.Vub
+ }
+ return 0
+}
+func (x *RemoveContainerResponse_Body) SetVub(v uint32) {
+ x.Vub = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *RemoveContainerResponse_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *RemoveContainerResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"vub\":"
+ out.RawString(prefix[1:])
+ out.Uint32(x.Vub)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *RemoveContainerResponse_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *RemoveContainerResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "vub":
+ {
+ var f uint32
+ f = in.Uint32()
+ x.Vub = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type RemoveContainerResponse struct {
+ Body *RemoveContainerResponse_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*RemoveContainerResponse)(nil)
+ _ encoding.ProtoUnmarshaler = (*RemoveContainerResponse)(nil)
+ _ json.Marshaler = (*RemoveContainerResponse)(nil)
+ _ json.Unmarshaler = (*RemoveContainerResponse)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -646,27 +2447,6 @@ func (x *RemoveContainerResponse) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *RemoveContainerResponse) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -682,9 +2462,149 @@ func (x *RemoveContainerResponse) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *RemoveContainerResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *RemoveContainerResponse) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *RemoveContainerResponse) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *RemoveContainerResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil && x.Body.StableSize() != 0 {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil && x.Signature.StableSize() != 0 {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *RemoveContainerResponse) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "RemoveContainerResponse")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(RemoveContainerResponse_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *RemoveContainerResponse) GetBody() *RemoveContainerResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *RemoveContainerResponse) SetBody(v *RemoveContainerResponse_Body) {
+ x.Body = v
+}
+func (x *RemoveContainerResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *RemoveContainerResponse) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *RemoveContainerResponse) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *RemoveContainerResponse) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ const prefix string = ",\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *RemoveContainerResponse) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *RemoveContainerResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *RemoveContainerResponse_Body
+ f = new(RemoveContainerResponse_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
}
diff --git a/pkg/services/control/ir/service_test.go b/pkg/services/control/ir/service_test.go
deleted file mode 100644
index 54eef5148..000000000
--- a/pkg/services/control/ir/service_test.go
+++ /dev/null
@@ -1,44 +0,0 @@
-package control_test
-
-import (
- "testing"
-
- control "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir"
- "github.com/stretchr/testify/require"
- "google.golang.org/protobuf/proto"
-)
-
-type protoMessage interface {
- StableMarshal([]byte) []byte
- proto.Message
-}
-
-func testStableMarshal(t *testing.T, m1, m2 protoMessage, cmp func(m1, m2 protoMessage) bool) {
- require.NoError(t, proto.Unmarshal(m1.StableMarshal(nil), m2))
-
- require.True(t, cmp(m1, m2))
-}
-
-func TestHealthCheckResponse_Body_StableMarshal(t *testing.T) {
- testStableMarshal(t,
- generateHealthCheckResponseBody(),
- new(control.HealthCheckResponse_Body),
- func(m1, m2 protoMessage) bool {
- return equalHealthCheckResponseBodies(
- m1.(*control.HealthCheckResponse_Body),
- m2.(*control.HealthCheckResponse_Body),
- )
- },
- )
-}
-
-func generateHealthCheckResponseBody() *control.HealthCheckResponse_Body {
- body := new(control.HealthCheckResponse_Body)
- body.SetHealthStatus(control.HealthStatus_SHUTTING_DOWN)
-
- return body
-}
-
-func equalHealthCheckResponseBodies(b1, b2 *control.HealthCheckResponse_Body) bool {
- return b1.GetHealthStatus() == b2.GetHealthStatus()
-}
diff --git a/pkg/services/control/ir/types.go b/pkg/services/control/ir/types.go
deleted file mode 100644
index 97ffd3ce3..000000000
--- a/pkg/services/control/ir/types.go
+++ /dev/null
@@ -1,15 +0,0 @@
-package control
-
-// SetKey sets public key used for signing.
-func (x *Signature) SetKey(v []byte) {
- if x != nil {
- x.Key = v
- }
-}
-
-// SetSign sets binary signature.
-func (x *Signature) SetSign(v []byte) {
- if x != nil {
- x.Sign = v
- }
-}
diff --git a/pkg/services/control/ir/types.pb.go b/pkg/services/control/ir/types.pb.go
deleted file mode 100644
index 840e0be67..000000000
--- a/pkg/services/control/ir/types.pb.go
+++ /dev/null
@@ -1,230 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// versions:
-// protoc-gen-go v1.33.0
-// protoc v4.25.0
-// source: pkg/services/control/ir/types.proto
-
-package control
-
-import (
- protoreflect "google.golang.org/protobuf/reflect/protoreflect"
- protoimpl "google.golang.org/protobuf/runtime/protoimpl"
- reflect "reflect"
- sync "sync"
-)
-
-const (
- // Verify that this generated code is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
- // Verify that runtime/protoimpl is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
-)
-
-// Health status of the IR application.
-type HealthStatus int32
-
-const (
- // Undefined status, default value.
- HealthStatus_HEALTH_STATUS_UNDEFINED HealthStatus = 0
- // IR application is starting.
- HealthStatus_STARTING HealthStatus = 1
- // IR application is started and serves all services.
- HealthStatus_READY HealthStatus = 2
- // IR application is shutting down.
- HealthStatus_SHUTTING_DOWN HealthStatus = 3
- // IR application is reconfiguring.
- HealthStatus_RECONFIGURING HealthStatus = 4
-)
-
-// Enum value maps for HealthStatus.
-var (
- HealthStatus_name = map[int32]string{
- 0: "HEALTH_STATUS_UNDEFINED",
- 1: "STARTING",
- 2: "READY",
- 3: "SHUTTING_DOWN",
- 4: "RECONFIGURING",
- }
- HealthStatus_value = map[string]int32{
- "HEALTH_STATUS_UNDEFINED": 0,
- "STARTING": 1,
- "READY": 2,
- "SHUTTING_DOWN": 3,
- "RECONFIGURING": 4,
- }
-)
-
-func (x HealthStatus) Enum() *HealthStatus {
- p := new(HealthStatus)
- *p = x
- return p
-}
-
-func (x HealthStatus) String() string {
- return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
-}
-
-func (HealthStatus) Descriptor() protoreflect.EnumDescriptor {
- return file_pkg_services_control_ir_types_proto_enumTypes[0].Descriptor()
-}
-
-func (HealthStatus) Type() protoreflect.EnumType {
- return &file_pkg_services_control_ir_types_proto_enumTypes[0]
-}
-
-func (x HealthStatus) Number() protoreflect.EnumNumber {
- return protoreflect.EnumNumber(x)
-}
-
-// Deprecated: Use HealthStatus.Descriptor instead.
-func (HealthStatus) EnumDescriptor() ([]byte, []int) {
- return file_pkg_services_control_ir_types_proto_rawDescGZIP(), []int{0}
-}
-
-// Signature of some message.
-type Signature struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Public key used for signing.
- Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
- // Binary signature.
- Sign []byte `protobuf:"bytes,2,opt,name=sign,json=signature,proto3" json:"sign,omitempty"`
-}
-
-func (x *Signature) Reset() {
- *x = Signature{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_ir_types_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Signature) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Signature) ProtoMessage() {}
-
-func (x *Signature) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_ir_types_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Signature.ProtoReflect.Descriptor instead.
-func (*Signature) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_ir_types_proto_rawDescGZIP(), []int{0}
-}
-
-func (x *Signature) GetKey() []byte {
- if x != nil {
- return x.Key
- }
- return nil
-}
-
-func (x *Signature) GetSign() []byte {
- if x != nil {
- return x.Sign
- }
- return nil
-}
-
-var File_pkg_services_control_ir_types_proto protoreflect.FileDescriptor
-
-var file_pkg_services_control_ir_types_proto_rawDesc = []byte{
- 0x0a, 0x23, 0x70, 0x6b, 0x67, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x63,
- 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2f, 0x69, 0x72, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x69, 0x72, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c,
- 0x22, 0x36, 0x0a, 0x09, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x10, 0x0a,
- 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12,
- 0x17, 0x0a, 0x04, 0x73, 0x69, 0x67, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x73,
- 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x2a, 0x6a, 0x0a, 0x0c, 0x48, 0x65, 0x61, 0x6c,
- 0x74, 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1b, 0x0a, 0x17, 0x48, 0x45, 0x41, 0x4c,
- 0x54, 0x48, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x55, 0x4e, 0x44, 0x45, 0x46, 0x49,
- 0x4e, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x54, 0x41, 0x52, 0x54, 0x49, 0x4e,
- 0x47, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x52, 0x45, 0x41, 0x44, 0x59, 0x10, 0x02, 0x12, 0x11,
- 0x0a, 0x0d, 0x53, 0x48, 0x55, 0x54, 0x54, 0x49, 0x4e, 0x47, 0x5f, 0x44, 0x4f, 0x57, 0x4e, 0x10,
- 0x03, 0x12, 0x11, 0x0a, 0x0d, 0x52, 0x45, 0x43, 0x4f, 0x4e, 0x46, 0x49, 0x47, 0x55, 0x52, 0x49,
- 0x4e, 0x47, 0x10, 0x04, 0x42, 0x44, 0x5a, 0x42, 0x67, 0x69, 0x74, 0x2e, 0x66, 0x72, 0x6f, 0x73,
- 0x74, 0x66, 0x73, 0x2e, 0x69, 0x6e, 0x66, 0x6f, 0x2f, 0x54, 0x72, 0x75, 0x65, 0x43, 0x6c, 0x6f,
- 0x75, 0x64, 0x4c, 0x61, 0x62, 0x2f, 0x66, 0x72, 0x6f, 0x73, 0x74, 0x66, 0x73, 0x2d, 0x6e, 0x6f,
- 0x64, 0x65, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f,
- 0x69, 0x72, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x33,
-}
-
-var (
- file_pkg_services_control_ir_types_proto_rawDescOnce sync.Once
- file_pkg_services_control_ir_types_proto_rawDescData = file_pkg_services_control_ir_types_proto_rawDesc
-)
-
-func file_pkg_services_control_ir_types_proto_rawDescGZIP() []byte {
- file_pkg_services_control_ir_types_proto_rawDescOnce.Do(func() {
- file_pkg_services_control_ir_types_proto_rawDescData = protoimpl.X.CompressGZIP(file_pkg_services_control_ir_types_proto_rawDescData)
- })
- return file_pkg_services_control_ir_types_proto_rawDescData
-}
-
-var file_pkg_services_control_ir_types_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
-var file_pkg_services_control_ir_types_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
-var file_pkg_services_control_ir_types_proto_goTypes = []interface{}{
- (HealthStatus)(0), // 0: ircontrol.HealthStatus
- (*Signature)(nil), // 1: ircontrol.Signature
-}
-var file_pkg_services_control_ir_types_proto_depIdxs = []int32{
- 0, // [0:0] is the sub-list for method output_type
- 0, // [0:0] is the sub-list for method input_type
- 0, // [0:0] is the sub-list for extension type_name
- 0, // [0:0] is the sub-list for extension extendee
- 0, // [0:0] is the sub-list for field type_name
-}
-
-func init() { file_pkg_services_control_ir_types_proto_init() }
-func file_pkg_services_control_ir_types_proto_init() {
- if File_pkg_services_control_ir_types_proto != nil {
- return
- }
- if !protoimpl.UnsafeEnabled {
- file_pkg_services_control_ir_types_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Signature); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
- type x struct{}
- out := protoimpl.TypeBuilder{
- File: protoimpl.DescBuilder{
- GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_pkg_services_control_ir_types_proto_rawDesc,
- NumEnums: 1,
- NumMessages: 1,
- NumExtensions: 0,
- NumServices: 0,
- },
- GoTypes: file_pkg_services_control_ir_types_proto_goTypes,
- DependencyIndexes: file_pkg_services_control_ir_types_proto_depIdxs,
- EnumInfos: file_pkg_services_control_ir_types_proto_enumTypes,
- MessageInfos: file_pkg_services_control_ir_types_proto_msgTypes,
- }.Build()
- File_pkg_services_control_ir_types_proto = out.File
- file_pkg_services_control_ir_types_proto_rawDesc = nil
- file_pkg_services_control_ir_types_proto_goTypes = nil
- file_pkg_services_control_ir_types_proto_depIdxs = nil
-}
diff --git a/pkg/services/control/ir/types_frostfs.pb.go b/pkg/services/control/ir/types_frostfs.pb.go
index ef2fc458e..b230726a9 100644
--- a/pkg/services/control/ir/types_frostfs.pb.go
+++ b/pkg/services/control/ir/types_frostfs.pb.go
@@ -2,7 +2,70 @@
package control
-import "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/proto"
+import (
+ json "encoding/json"
+ fmt "fmt"
+ pool "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/pool"
+ proto "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/proto"
+ encoding "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/proto/encoding"
+ easyproto "github.com/VictoriaMetrics/easyproto"
+ jlexer "github.com/mailru/easyjson/jlexer"
+ jwriter "github.com/mailru/easyjson/jwriter"
+ strconv "strconv"
+)
+
+type HealthStatus int32
+
+const (
+ HealthStatus_HEALTH_STATUS_UNDEFINED HealthStatus = 0
+ HealthStatus_STARTING HealthStatus = 1
+ HealthStatus_READY HealthStatus = 2
+ HealthStatus_SHUTTING_DOWN HealthStatus = 3
+ HealthStatus_RECONFIGURING HealthStatus = 4
+)
+
+var (
+ HealthStatus_name = map[int32]string{
+ 0: "HEALTH_STATUS_UNDEFINED",
+ 1: "STARTING",
+ 2: "READY",
+ 3: "SHUTTING_DOWN",
+ 4: "RECONFIGURING",
+ }
+ HealthStatus_value = map[string]int32{
+ "HEALTH_STATUS_UNDEFINED": 0,
+ "STARTING": 1,
+ "READY": 2,
+ "SHUTTING_DOWN": 3,
+ "RECONFIGURING": 4,
+ }
+)
+
+func (x HealthStatus) String() string {
+ if v, ok := HealthStatus_name[int32(x)]; ok {
+ return v
+ }
+ return strconv.FormatInt(int64(x), 10)
+}
+func (x *HealthStatus) FromString(s string) bool {
+ if v, ok := HealthStatus_value[s]; ok {
+ *x = HealthStatus(v)
+ return true
+ }
+ return false
+}
+
+type Signature struct {
+ Key []byte `json:"key"`
+ Sign []byte `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*Signature)(nil)
+ _ encoding.ProtoUnmarshaler = (*Signature)(nil)
+ _ json.Marshaler = (*Signature)(nil)
+ _ json.Unmarshaler = (*Signature)(nil)
+)
// StableSize returns the size of x in protobuf format.
//
@@ -16,23 +79,138 @@ func (x *Signature) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *Signature) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.BytesMarshal(1, buf[offset:], x.Key)
- offset += proto.BytesMarshal(2, buf[offset:], x.Sign)
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *Signature) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *Signature) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if len(x.Key) != 0 {
+ mm.AppendBytes(1, x.Key)
+ }
+ if len(x.Sign) != 0 {
+ mm.AppendBytes(2, x.Sign)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *Signature) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "Signature")
+ }
+ switch fc.FieldNum {
+ case 1: // Key
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Key")
+ }
+ x.Key = data
+ case 2: // Sign
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Sign")
+ }
+ x.Sign = data
+ }
+ }
+ return nil
+}
+func (x *Signature) GetKey() []byte {
+ if x != nil {
+ return x.Key
+ }
+ return nil
+}
+func (x *Signature) SetKey(v []byte) {
+ x.Key = v
+}
+func (x *Signature) GetSign() []byte {
+ if x != nil {
+ return x.Sign
+ }
+ return nil
+}
+func (x *Signature) SetSign(v []byte) {
+ x.Sign = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *Signature) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *Signature) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"key\":"
+ out.RawString(prefix[1:])
+ out.Base64Bytes(x.Key)
+ }
+ {
+ const prefix string = ",\"signature\":"
+ out.RawString(prefix)
+ out.Base64Bytes(x.Sign)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *Signature) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *Signature) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "key":
+ {
+ var f []byte
+ f = in.Bytes()
+ x.Key = f
+ }
+ case "signature":
+ {
+ var f []byte
+ f = in.Bytes()
+ x.Sign = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
}
diff --git a/pkg/services/control/server/list_shards.go b/pkg/services/control/server/list_shards.go
index d6531b947..b639245c1 100644
--- a/pkg/services/control/server/list_shards.go
+++ b/pkg/services/control/server/list_shards.go
@@ -30,10 +30,10 @@ func (s *Server) ListShards(_ context.Context, req *control.ListShardsRequest) (
for _, sh := range info.Shards {
si := new(control.ShardInfo)
- si.SetID(*sh.ID)
+ si.SetShard_ID(*sh.ID)
si.SetMetabasePath(sh.MetaBaseInfo.Path)
si.Blobstor = blobstorInfoToProto(sh.BlobStorInfo)
- si.SetWriteCachePath(sh.WriteCacheInfo.Path)
+ si.SetWritecachePath(sh.WriteCacheInfo.Path)
si.SetPiloramaPath(sh.PiloramaInfo.Path)
var m control.ShardMode
diff --git a/pkg/services/control/service.go b/pkg/services/control/service.go
deleted file mode 100644
index ef0c0a8d2..000000000
--- a/pkg/services/control/service.go
+++ /dev/null
@@ -1,142 +0,0 @@
-package control
-
-// SetBody sets health check request body.
-func (x *HealthCheckRequest) SetBody(v *HealthCheckRequest_Body) {
- if x != nil {
- x.Body = v
- }
-}
-
-// SetNetmapStatus sets status of the storage node in FrostFS network map.
-func (x *HealthCheckResponse_Body) SetNetmapStatus(v NetmapStatus) {
- if x != nil {
- x.NetmapStatus = v
- }
-}
-
-// SetHealthStatus sets health status of the storage node application.
-func (x *HealthCheckResponse_Body) SetHealthStatus(v HealthStatus) {
- if x != nil {
- x.HealthStatus = v
- }
-}
-
-// SetBody sets health check response body.
-func (x *HealthCheckResponse) SetBody(v *HealthCheckResponse_Body) {
- if x != nil {
- x.Body = v
- }
-}
-
-// SetStatus sets new storage node status in FrostFS network map.
-func (x *SetNetmapStatusRequest_Body) SetStatus(v NetmapStatus) {
- if x != nil {
- x.Status = v
- }
-}
-
-// SetForceMaintenance sets force_maintenance flag in the message.
-func (x *SetNetmapStatusRequest_Body) SetForceMaintenance() {
- x.ForceMaintenance = true
-}
-
-// SetBody sets body of the set netmap status request .
-func (x *SetNetmapStatusRequest) SetBody(v *SetNetmapStatusRequest_Body) {
- if x != nil {
- x.Body = v
- }
-}
-
-// SetBody sets set body of the netmap status response.
-func (x *SetNetmapStatusResponse) SetBody(v *SetNetmapStatusResponse_Body) {
- if x != nil {
- x.Body = v
- }
-}
-
-// SetAddressList sets list of objects to be removed in FrostFS API binary format.
-func (x *DropObjectsRequest_Body) SetAddressList(v [][]byte) {
- if x != nil {
- x.AddressList = v
- }
-}
-
-// SetBody sets body of the set "Drop objects" request.
-func (x *DropObjectsRequest) SetBody(v *DropObjectsRequest_Body) {
- if x != nil {
- x.Body = v
- }
-}
-
-// SetBody sets set body of the "Drop objects" response.
-func (x *DropObjectsResponse) SetBody(v *DropObjectsResponse_Body) {
- if x != nil {
- x.Body = v
- }
-}
-
-// SetBody sets list shards request body.
-func (x *ListShardsRequest) SetBody(v *ListShardsRequest_Body) {
- if x != nil {
- x.Body = v
- }
-}
-
-// SetShards sets shards of the storage node.
-func (x *ListShardsResponse_Body) SetShards(v []*ShardInfo) {
- if x != nil {
- x.Shards = v
- }
-}
-
-// SetBody sets list shards response body.
-func (x *ListShardsResponse) SetBody(v *ListShardsResponse_Body) {
- if x != nil {
- x.Body = v
- }
-}
-
-// SetShardIDList sets shard ID whose mode is requested to be set.
-func (x *SetShardModeRequest_Body) SetShardIDList(v [][]byte) {
- if v != nil {
- x.Shard_ID = v
- }
-}
-
-// SetMode sets mode of the shard.
-func (x *SetShardModeRequest_Body) SetMode(v ShardMode) {
- x.Mode = v
-}
-
-// ClearErrorCounter sets flag signifying whether error counter for shard should be cleared.
-func (x *SetShardModeRequest_Body) ClearErrorCounter(reset bool) {
- x.ResetErrorCounter = reset
-}
-
-// SetBody sets request body.
-func (x *SetShardModeRequest) SetBody(v *SetShardModeRequest_Body) {
- if x != nil {
- x.Body = v
- }
-}
-
-// SetBody sets body of the set shard mode response.
-func (x *SetShardModeResponse) SetBody(v *SetShardModeResponse_Body) {
- if x != nil {
- x.Body = v
- }
-}
-
-// SetBody sets list shards request body.
-func (x *SynchronizeTreeRequest) SetBody(v *SynchronizeTreeRequest_Body) {
- if x != nil {
- x.Body = v
- }
-}
-
-// SetBody sets list shards response body.
-func (x *SynchronizeTreeResponse) SetBody(v *SynchronizeTreeResponse_Body) {
- if x != nil {
- x.Body = v
- }
-}
diff --git a/pkg/services/control/service.pb.go b/pkg/services/control/service.pb.go
deleted file mode 100644
index e5a5ce24c..000000000
--- a/pkg/services/control/service.pb.go
+++ /dev/null
@@ -1,7094 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// versions:
-// protoc-gen-go v1.33.0
-// protoc v4.25.0
-// source: pkg/services/control/service.proto
-
-package control
-
-import (
- protoreflect "google.golang.org/protobuf/reflect/protoreflect"
- protoimpl "google.golang.org/protobuf/runtime/protoimpl"
- reflect "reflect"
- sync "sync"
-)
-
-const (
- // Verify that this generated code is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
- // Verify that runtime/protoimpl is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
-)
-
-type StartShardEvacuationRequest_Body_Scope int32
-
-const (
- StartShardEvacuationRequest_Body_NONE StartShardEvacuationRequest_Body_Scope = 0
- StartShardEvacuationRequest_Body_OBJECTS StartShardEvacuationRequest_Body_Scope = 1
- StartShardEvacuationRequest_Body_TREES StartShardEvacuationRequest_Body_Scope = 2
-)
-
-// Enum value maps for StartShardEvacuationRequest_Body_Scope.
-var (
- StartShardEvacuationRequest_Body_Scope_name = map[int32]string{
- 0: "NONE",
- 1: "OBJECTS",
- 2: "TREES",
- }
- StartShardEvacuationRequest_Body_Scope_value = map[string]int32{
- "NONE": 0,
- "OBJECTS": 1,
- "TREES": 2,
- }
-)
-
-func (x StartShardEvacuationRequest_Body_Scope) Enum() *StartShardEvacuationRequest_Body_Scope {
- p := new(StartShardEvacuationRequest_Body_Scope)
- *p = x
- return p
-}
-
-func (x StartShardEvacuationRequest_Body_Scope) String() string {
- return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
-}
-
-func (StartShardEvacuationRequest_Body_Scope) Descriptor() protoreflect.EnumDescriptor {
- return file_pkg_services_control_service_proto_enumTypes[0].Descriptor()
-}
-
-func (StartShardEvacuationRequest_Body_Scope) Type() protoreflect.EnumType {
- return &file_pkg_services_control_service_proto_enumTypes[0]
-}
-
-func (x StartShardEvacuationRequest_Body_Scope) Number() protoreflect.EnumNumber {
- return protoreflect.EnumNumber(x)
-}
-
-// Deprecated: Use StartShardEvacuationRequest_Body_Scope.Descriptor instead.
-func (StartShardEvacuationRequest_Body_Scope) EnumDescriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{20, 0, 0}
-}
-
-// Evacuate status enum.
-type GetShardEvacuationStatusResponse_Body_Status int32
-
-const (
- GetShardEvacuationStatusResponse_Body_EVACUATE_SHARD_STATUS_UNDEFINED GetShardEvacuationStatusResponse_Body_Status = 0
- GetShardEvacuationStatusResponse_Body_RUNNING GetShardEvacuationStatusResponse_Body_Status = 1
- GetShardEvacuationStatusResponse_Body_COMPLETED GetShardEvacuationStatusResponse_Body_Status = 2
-)
-
-// Enum value maps for GetShardEvacuationStatusResponse_Body_Status.
-var (
- GetShardEvacuationStatusResponse_Body_Status_name = map[int32]string{
- 0: "EVACUATE_SHARD_STATUS_UNDEFINED",
- 1: "RUNNING",
- 2: "COMPLETED",
- }
- GetShardEvacuationStatusResponse_Body_Status_value = map[string]int32{
- "EVACUATE_SHARD_STATUS_UNDEFINED": 0,
- "RUNNING": 1,
- "COMPLETED": 2,
- }
-)
-
-func (x GetShardEvacuationStatusResponse_Body_Status) Enum() *GetShardEvacuationStatusResponse_Body_Status {
- p := new(GetShardEvacuationStatusResponse_Body_Status)
- *p = x
- return p
-}
-
-func (x GetShardEvacuationStatusResponse_Body_Status) String() string {
- return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
-}
-
-func (GetShardEvacuationStatusResponse_Body_Status) Descriptor() protoreflect.EnumDescriptor {
- return file_pkg_services_control_service_proto_enumTypes[1].Descriptor()
-}
-
-func (GetShardEvacuationStatusResponse_Body_Status) Type() protoreflect.EnumType {
- return &file_pkg_services_control_service_proto_enumTypes[1]
-}
-
-func (x GetShardEvacuationStatusResponse_Body_Status) Number() protoreflect.EnumNumber {
- return protoreflect.EnumNumber(x)
-}
-
-// Deprecated: Use GetShardEvacuationStatusResponse_Body_Status.Descriptor instead.
-func (GetShardEvacuationStatusResponse_Body_Status) EnumDescriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{23, 0, 0}
-}
-
-// Health check request.
-type HealthCheckRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Body of health check request message.
- Body *HealthCheckRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- // Body signature.
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *HealthCheckRequest) Reset() {
- *x = HealthCheckRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *HealthCheckRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*HealthCheckRequest) ProtoMessage() {}
-
-func (x *HealthCheckRequest) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use HealthCheckRequest.ProtoReflect.Descriptor instead.
-func (*HealthCheckRequest) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{0}
-}
-
-func (x *HealthCheckRequest) GetBody() *HealthCheckRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *HealthCheckRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-// Health check request.
-type HealthCheckResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Body of health check response message.
- Body *HealthCheckResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- // Body signature.
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *HealthCheckResponse) Reset() {
- *x = HealthCheckResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *HealthCheckResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*HealthCheckResponse) ProtoMessage() {}
-
-func (x *HealthCheckResponse) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use HealthCheckResponse.ProtoReflect.Descriptor instead.
-func (*HealthCheckResponse) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{1}
-}
-
-func (x *HealthCheckResponse) GetBody() *HealthCheckResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *HealthCheckResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-// Set netmap status request.
-type SetNetmapStatusRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Body of set netmap status request message.
- Body *SetNetmapStatusRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- // Body signature.
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *SetNetmapStatusRequest) Reset() {
- *x = SetNetmapStatusRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[2]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *SetNetmapStatusRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*SetNetmapStatusRequest) ProtoMessage() {}
-
-func (x *SetNetmapStatusRequest) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[2]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use SetNetmapStatusRequest.ProtoReflect.Descriptor instead.
-func (*SetNetmapStatusRequest) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{2}
-}
-
-func (x *SetNetmapStatusRequest) GetBody() *SetNetmapStatusRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *SetNetmapStatusRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-// Set netmap status response.
-type SetNetmapStatusResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Body of set netmap status response message.
- Body *SetNetmapStatusResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- // Body signature.
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *SetNetmapStatusResponse) Reset() {
- *x = SetNetmapStatusResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[3]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *SetNetmapStatusResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*SetNetmapStatusResponse) ProtoMessage() {}
-
-func (x *SetNetmapStatusResponse) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[3]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use SetNetmapStatusResponse.ProtoReflect.Descriptor instead.
-func (*SetNetmapStatusResponse) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{3}
-}
-
-func (x *SetNetmapStatusResponse) GetBody() *SetNetmapStatusResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *SetNetmapStatusResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-// Get netmap status request.
-type GetNetmapStatusRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Body of set netmap status request message.
- Body *GetNetmapStatusRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- // Body signature.
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *GetNetmapStatusRequest) Reset() {
- *x = GetNetmapStatusRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[4]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetNetmapStatusRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetNetmapStatusRequest) ProtoMessage() {}
-
-func (x *GetNetmapStatusRequest) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[4]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetNetmapStatusRequest.ProtoReflect.Descriptor instead.
-func (*GetNetmapStatusRequest) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{4}
-}
-
-func (x *GetNetmapStatusRequest) GetBody() *GetNetmapStatusRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *GetNetmapStatusRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-// Get netmap status response.
-type GetNetmapStatusResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Body of get netmap status response message.
- Body *GetNetmapStatusResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- // Body signature.
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *GetNetmapStatusResponse) Reset() {
- *x = GetNetmapStatusResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[5]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetNetmapStatusResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetNetmapStatusResponse) ProtoMessage() {}
-
-func (x *GetNetmapStatusResponse) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[5]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetNetmapStatusResponse.ProtoReflect.Descriptor instead.
-func (*GetNetmapStatusResponse) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{5}
-}
-
-func (x *GetNetmapStatusResponse) GetBody() *GetNetmapStatusResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *GetNetmapStatusResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-// Request to drop the objects.
-type DropObjectsRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Body of the request message.
- Body *DropObjectsRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- // Body signature.
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *DropObjectsRequest) Reset() {
- *x = DropObjectsRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[6]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *DropObjectsRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*DropObjectsRequest) ProtoMessage() {}
-
-func (x *DropObjectsRequest) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[6]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use DropObjectsRequest.ProtoReflect.Descriptor instead.
-func (*DropObjectsRequest) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{6}
-}
-
-func (x *DropObjectsRequest) GetBody() *DropObjectsRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *DropObjectsRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-// Response to request to drop the objects.
-type DropObjectsResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Body of the response message.
- Body *DropObjectsResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- // Body signature.
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *DropObjectsResponse) Reset() {
- *x = DropObjectsResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[7]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *DropObjectsResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*DropObjectsResponse) ProtoMessage() {}
-
-func (x *DropObjectsResponse) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[7]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use DropObjectsResponse.ProtoReflect.Descriptor instead.
-func (*DropObjectsResponse) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{7}
-}
-
-func (x *DropObjectsResponse) GetBody() *DropObjectsResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *DropObjectsResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-// Request to list all shards of the node.
-type ListShardsRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Body of the request message.
- Body *ListShardsRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- // Body signature.
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *ListShardsRequest) Reset() {
- *x = ListShardsRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[8]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ListShardsRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ListShardsRequest) ProtoMessage() {}
-
-func (x *ListShardsRequest) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[8]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ListShardsRequest.ProtoReflect.Descriptor instead.
-func (*ListShardsRequest) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{8}
-}
-
-func (x *ListShardsRequest) GetBody() *ListShardsRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *ListShardsRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-// ListShards response.
-type ListShardsResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Body of the response message.
- Body *ListShardsResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- // Body signature.
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *ListShardsResponse) Reset() {
- *x = ListShardsResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[9]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ListShardsResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ListShardsResponse) ProtoMessage() {}
-
-func (x *ListShardsResponse) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[9]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ListShardsResponse.ProtoReflect.Descriptor instead.
-func (*ListShardsResponse) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{9}
-}
-
-func (x *ListShardsResponse) GetBody() *ListShardsResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *ListShardsResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-// Request to set mode of the shard.
-type SetShardModeRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Body of set shard mode request message.
- Body *SetShardModeRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- // Body signature.
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *SetShardModeRequest) Reset() {
- *x = SetShardModeRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[10]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *SetShardModeRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*SetShardModeRequest) ProtoMessage() {}
-
-func (x *SetShardModeRequest) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[10]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use SetShardModeRequest.ProtoReflect.Descriptor instead.
-func (*SetShardModeRequest) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{10}
-}
-
-func (x *SetShardModeRequest) GetBody() *SetShardModeRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *SetShardModeRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-// SetShardMode response.
-type SetShardModeResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Body of set shard mode response message.
- Body *SetShardModeResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- // Body signature.
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *SetShardModeResponse) Reset() {
- *x = SetShardModeResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[11]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *SetShardModeResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*SetShardModeResponse) ProtoMessage() {}
-
-func (x *SetShardModeResponse) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[11]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use SetShardModeResponse.ProtoReflect.Descriptor instead.
-func (*SetShardModeResponse) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{11}
-}
-
-func (x *SetShardModeResponse) GetBody() *SetShardModeResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *SetShardModeResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-// SynchronizeTree request.
-type SynchronizeTreeRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Body of restore shard request message.
- Body *SynchronizeTreeRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- // Body signature.
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *SynchronizeTreeRequest) Reset() {
- *x = SynchronizeTreeRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[12]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *SynchronizeTreeRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*SynchronizeTreeRequest) ProtoMessage() {}
-
-func (x *SynchronizeTreeRequest) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[12]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use SynchronizeTreeRequest.ProtoReflect.Descriptor instead.
-func (*SynchronizeTreeRequest) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{12}
-}
-
-func (x *SynchronizeTreeRequest) GetBody() *SynchronizeTreeRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *SynchronizeTreeRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-// SynchronizeTree response.
-type SynchronizeTreeResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Body of restore shard response message.
- Body *SynchronizeTreeResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- // Body signature.
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *SynchronizeTreeResponse) Reset() {
- *x = SynchronizeTreeResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[13]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *SynchronizeTreeResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*SynchronizeTreeResponse) ProtoMessage() {}
-
-func (x *SynchronizeTreeResponse) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[13]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use SynchronizeTreeResponse.ProtoReflect.Descriptor instead.
-func (*SynchronizeTreeResponse) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{13}
-}
-
-func (x *SynchronizeTreeResponse) GetBody() *SynchronizeTreeResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *SynchronizeTreeResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-// EvacuateShard request.
-type EvacuateShardRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Body *EvacuateShardRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *EvacuateShardRequest) Reset() {
- *x = EvacuateShardRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[14]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *EvacuateShardRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*EvacuateShardRequest) ProtoMessage() {}
-
-func (x *EvacuateShardRequest) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[14]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use EvacuateShardRequest.ProtoReflect.Descriptor instead.
-func (*EvacuateShardRequest) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{14}
-}
-
-func (x *EvacuateShardRequest) GetBody() *EvacuateShardRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *EvacuateShardRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-// EvacuateShard response.
-type EvacuateShardResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Body *EvacuateShardResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *EvacuateShardResponse) Reset() {
- *x = EvacuateShardResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[15]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *EvacuateShardResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*EvacuateShardResponse) ProtoMessage() {}
-
-func (x *EvacuateShardResponse) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[15]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use EvacuateShardResponse.ProtoReflect.Descriptor instead.
-func (*EvacuateShardResponse) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{15}
-}
-
-func (x *EvacuateShardResponse) GetBody() *EvacuateShardResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *EvacuateShardResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-// FlushCache request.
-type FlushCacheRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Body *FlushCacheRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *FlushCacheRequest) Reset() {
- *x = FlushCacheRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[16]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *FlushCacheRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*FlushCacheRequest) ProtoMessage() {}
-
-func (x *FlushCacheRequest) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[16]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use FlushCacheRequest.ProtoReflect.Descriptor instead.
-func (*FlushCacheRequest) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{16}
-}
-
-func (x *FlushCacheRequest) GetBody() *FlushCacheRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *FlushCacheRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-// FlushCache response.
-type FlushCacheResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Body *FlushCacheResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *FlushCacheResponse) Reset() {
- *x = FlushCacheResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[17]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *FlushCacheResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*FlushCacheResponse) ProtoMessage() {}
-
-func (x *FlushCacheResponse) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[17]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use FlushCacheResponse.ProtoReflect.Descriptor instead.
-func (*FlushCacheResponse) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{17}
-}
-
-func (x *FlushCacheResponse) GetBody() *FlushCacheResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *FlushCacheResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-// Doctor request.
-type DoctorRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Body *DoctorRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *DoctorRequest) Reset() {
- *x = DoctorRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[18]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *DoctorRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*DoctorRequest) ProtoMessage() {}
-
-func (x *DoctorRequest) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[18]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use DoctorRequest.ProtoReflect.Descriptor instead.
-func (*DoctorRequest) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{18}
-}
-
-func (x *DoctorRequest) GetBody() *DoctorRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *DoctorRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-// Doctor response.
-type DoctorResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Body *DoctorResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *DoctorResponse) Reset() {
- *x = DoctorResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[19]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *DoctorResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*DoctorResponse) ProtoMessage() {}
-
-func (x *DoctorResponse) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[19]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use DoctorResponse.ProtoReflect.Descriptor instead.
-func (*DoctorResponse) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{19}
-}
-
-func (x *DoctorResponse) GetBody() *DoctorResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *DoctorResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-// StartShardEvacuation request.
-type StartShardEvacuationRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Body *StartShardEvacuationRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *StartShardEvacuationRequest) Reset() {
- *x = StartShardEvacuationRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[20]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *StartShardEvacuationRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*StartShardEvacuationRequest) ProtoMessage() {}
-
-func (x *StartShardEvacuationRequest) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[20]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use StartShardEvacuationRequest.ProtoReflect.Descriptor instead.
-func (*StartShardEvacuationRequest) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{20}
-}
-
-func (x *StartShardEvacuationRequest) GetBody() *StartShardEvacuationRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *StartShardEvacuationRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-// StartShardEvacuation response.
-type StartShardEvacuationResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Body *StartShardEvacuationResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *StartShardEvacuationResponse) Reset() {
- *x = StartShardEvacuationResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[21]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *StartShardEvacuationResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*StartShardEvacuationResponse) ProtoMessage() {}
-
-func (x *StartShardEvacuationResponse) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[21]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use StartShardEvacuationResponse.ProtoReflect.Descriptor instead.
-func (*StartShardEvacuationResponse) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{21}
-}
-
-func (x *StartShardEvacuationResponse) GetBody() *StartShardEvacuationResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *StartShardEvacuationResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-// GetShardEvacuationStatus request.
-type GetShardEvacuationStatusRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Body *GetShardEvacuationStatusRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *GetShardEvacuationStatusRequest) Reset() {
- *x = GetShardEvacuationStatusRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[22]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetShardEvacuationStatusRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetShardEvacuationStatusRequest) ProtoMessage() {}
-
-func (x *GetShardEvacuationStatusRequest) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[22]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetShardEvacuationStatusRequest.ProtoReflect.Descriptor instead.
-func (*GetShardEvacuationStatusRequest) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{22}
-}
-
-func (x *GetShardEvacuationStatusRequest) GetBody() *GetShardEvacuationStatusRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *GetShardEvacuationStatusRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-// GetShardEvacuationStatus response.
-type GetShardEvacuationStatusResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Body *GetShardEvacuationStatusResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *GetShardEvacuationStatusResponse) Reset() {
- *x = GetShardEvacuationStatusResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[23]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetShardEvacuationStatusResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetShardEvacuationStatusResponse) ProtoMessage() {}
-
-func (x *GetShardEvacuationStatusResponse) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[23]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetShardEvacuationStatusResponse.ProtoReflect.Descriptor instead.
-func (*GetShardEvacuationStatusResponse) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{23}
-}
-
-func (x *GetShardEvacuationStatusResponse) GetBody() *GetShardEvacuationStatusResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *GetShardEvacuationStatusResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-// ResetShardEvacuationStatus request.
-type ResetShardEvacuationStatusRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Body *ResetShardEvacuationStatusRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *ResetShardEvacuationStatusRequest) Reset() {
- *x = ResetShardEvacuationStatusRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[24]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ResetShardEvacuationStatusRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ResetShardEvacuationStatusRequest) ProtoMessage() {}
-
-func (x *ResetShardEvacuationStatusRequest) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[24]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ResetShardEvacuationStatusRequest.ProtoReflect.Descriptor instead.
-func (*ResetShardEvacuationStatusRequest) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{24}
-}
-
-func (x *ResetShardEvacuationStatusRequest) GetBody() *ResetShardEvacuationStatusRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *ResetShardEvacuationStatusRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-// ResetShardEvacuationStatus response.
-type ResetShardEvacuationStatusResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Body *ResetShardEvacuationStatusResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *ResetShardEvacuationStatusResponse) Reset() {
- *x = ResetShardEvacuationStatusResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[25]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ResetShardEvacuationStatusResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ResetShardEvacuationStatusResponse) ProtoMessage() {}
-
-func (x *ResetShardEvacuationStatusResponse) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[25]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ResetShardEvacuationStatusResponse.ProtoReflect.Descriptor instead.
-func (*ResetShardEvacuationStatusResponse) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{25}
-}
-
-func (x *ResetShardEvacuationStatusResponse) GetBody() *ResetShardEvacuationStatusResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *ResetShardEvacuationStatusResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-// StopShardEvacuation request.
-type StopShardEvacuationRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Body *StopShardEvacuationRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *StopShardEvacuationRequest) Reset() {
- *x = StopShardEvacuationRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[26]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *StopShardEvacuationRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*StopShardEvacuationRequest) ProtoMessage() {}
-
-func (x *StopShardEvacuationRequest) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[26]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use StopShardEvacuationRequest.ProtoReflect.Descriptor instead.
-func (*StopShardEvacuationRequest) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{26}
-}
-
-func (x *StopShardEvacuationRequest) GetBody() *StopShardEvacuationRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *StopShardEvacuationRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-// StopShardEvacuation response.
-type StopShardEvacuationResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Body *StopShardEvacuationResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *StopShardEvacuationResponse) Reset() {
- *x = StopShardEvacuationResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[27]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *StopShardEvacuationResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*StopShardEvacuationResponse) ProtoMessage() {}
-
-func (x *StopShardEvacuationResponse) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[27]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use StopShardEvacuationResponse.ProtoReflect.Descriptor instead.
-func (*StopShardEvacuationResponse) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{27}
-}
-
-func (x *StopShardEvacuationResponse) GetBody() *StopShardEvacuationResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *StopShardEvacuationResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-// AddChainLocalOverride request.
-type AddChainLocalOverrideRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Body *AddChainLocalOverrideRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *AddChainLocalOverrideRequest) Reset() {
- *x = AddChainLocalOverrideRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[28]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *AddChainLocalOverrideRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*AddChainLocalOverrideRequest) ProtoMessage() {}
-
-func (x *AddChainLocalOverrideRequest) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[28]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use AddChainLocalOverrideRequest.ProtoReflect.Descriptor instead.
-func (*AddChainLocalOverrideRequest) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{28}
-}
-
-func (x *AddChainLocalOverrideRequest) GetBody() *AddChainLocalOverrideRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *AddChainLocalOverrideRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-// AddChainLocalOverride response.
-type AddChainLocalOverrideResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Body *AddChainLocalOverrideResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *AddChainLocalOverrideResponse) Reset() {
- *x = AddChainLocalOverrideResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[29]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *AddChainLocalOverrideResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*AddChainLocalOverrideResponse) ProtoMessage() {}
-
-func (x *AddChainLocalOverrideResponse) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[29]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use AddChainLocalOverrideResponse.ProtoReflect.Descriptor instead.
-func (*AddChainLocalOverrideResponse) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{29}
-}
-
-func (x *AddChainLocalOverrideResponse) GetBody() *AddChainLocalOverrideResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *AddChainLocalOverrideResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-// GetChainLocalOverride request.
-type GetChainLocalOverrideRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Body *GetChainLocalOverrideRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *GetChainLocalOverrideRequest) Reset() {
- *x = GetChainLocalOverrideRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[30]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetChainLocalOverrideRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetChainLocalOverrideRequest) ProtoMessage() {}
-
-func (x *GetChainLocalOverrideRequest) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[30]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetChainLocalOverrideRequest.ProtoReflect.Descriptor instead.
-func (*GetChainLocalOverrideRequest) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{30}
-}
-
-func (x *GetChainLocalOverrideRequest) GetBody() *GetChainLocalOverrideRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *GetChainLocalOverrideRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-// GetChainLocalOverride response.
-type GetChainLocalOverrideResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Body *GetChainLocalOverrideResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *GetChainLocalOverrideResponse) Reset() {
- *x = GetChainLocalOverrideResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[31]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetChainLocalOverrideResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetChainLocalOverrideResponse) ProtoMessage() {}
-
-func (x *GetChainLocalOverrideResponse) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[31]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetChainLocalOverrideResponse.ProtoReflect.Descriptor instead.
-func (*GetChainLocalOverrideResponse) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{31}
-}
-
-func (x *GetChainLocalOverrideResponse) GetBody() *GetChainLocalOverrideResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *GetChainLocalOverrideResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-// ListChainLocalOverrides request.
-type ListChainLocalOverridesRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Body *ListChainLocalOverridesRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *ListChainLocalOverridesRequest) Reset() {
- *x = ListChainLocalOverridesRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[32]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ListChainLocalOverridesRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ListChainLocalOverridesRequest) ProtoMessage() {}
-
-func (x *ListChainLocalOverridesRequest) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[32]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ListChainLocalOverridesRequest.ProtoReflect.Descriptor instead.
-func (*ListChainLocalOverridesRequest) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{32}
-}
-
-func (x *ListChainLocalOverridesRequest) GetBody() *ListChainLocalOverridesRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *ListChainLocalOverridesRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-// ListChainLocalOverrides response.
-type ListChainLocalOverridesResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Body *ListChainLocalOverridesResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *ListChainLocalOverridesResponse) Reset() {
- *x = ListChainLocalOverridesResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[33]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ListChainLocalOverridesResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ListChainLocalOverridesResponse) ProtoMessage() {}
-
-func (x *ListChainLocalOverridesResponse) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[33]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ListChainLocalOverridesResponse.ProtoReflect.Descriptor instead.
-func (*ListChainLocalOverridesResponse) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{33}
-}
-
-func (x *ListChainLocalOverridesResponse) GetBody() *ListChainLocalOverridesResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *ListChainLocalOverridesResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-// ListTargetsLocalOverrides request.
-type ListTargetsLocalOverridesRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Body *ListTargetsLocalOverridesRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *ListTargetsLocalOverridesRequest) Reset() {
- *x = ListTargetsLocalOverridesRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[34]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ListTargetsLocalOverridesRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ListTargetsLocalOverridesRequest) ProtoMessage() {}
-
-func (x *ListTargetsLocalOverridesRequest) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[34]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ListTargetsLocalOverridesRequest.ProtoReflect.Descriptor instead.
-func (*ListTargetsLocalOverridesRequest) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{34}
-}
-
-func (x *ListTargetsLocalOverridesRequest) GetBody() *ListTargetsLocalOverridesRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *ListTargetsLocalOverridesRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-// ListTargetsLocalOverrides response.
-type ListTargetsLocalOverridesResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Body *ListTargetsLocalOverridesResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *ListTargetsLocalOverridesResponse) Reset() {
- *x = ListTargetsLocalOverridesResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[35]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ListTargetsLocalOverridesResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ListTargetsLocalOverridesResponse) ProtoMessage() {}
-
-func (x *ListTargetsLocalOverridesResponse) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[35]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ListTargetsLocalOverridesResponse.ProtoReflect.Descriptor instead.
-func (*ListTargetsLocalOverridesResponse) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{35}
-}
-
-func (x *ListTargetsLocalOverridesResponse) GetBody() *ListTargetsLocalOverridesResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *ListTargetsLocalOverridesResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-type RemoveChainLocalOverrideRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Body *RemoveChainLocalOverrideRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *RemoveChainLocalOverrideRequest) Reset() {
- *x = RemoveChainLocalOverrideRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[36]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *RemoveChainLocalOverrideRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*RemoveChainLocalOverrideRequest) ProtoMessage() {}
-
-func (x *RemoveChainLocalOverrideRequest) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[36]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use RemoveChainLocalOverrideRequest.ProtoReflect.Descriptor instead.
-func (*RemoveChainLocalOverrideRequest) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{36}
-}
-
-func (x *RemoveChainLocalOverrideRequest) GetBody() *RemoveChainLocalOverrideRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *RemoveChainLocalOverrideRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-type RemoveChainLocalOverrideResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Body *RemoveChainLocalOverrideResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *RemoveChainLocalOverrideResponse) Reset() {
- *x = RemoveChainLocalOverrideResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[37]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *RemoveChainLocalOverrideResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*RemoveChainLocalOverrideResponse) ProtoMessage() {}
-
-func (x *RemoveChainLocalOverrideResponse) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[37]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use RemoveChainLocalOverrideResponse.ProtoReflect.Descriptor instead.
-func (*RemoveChainLocalOverrideResponse) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{37}
-}
-
-func (x *RemoveChainLocalOverrideResponse) GetBody() *RemoveChainLocalOverrideResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *RemoveChainLocalOverrideResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-type RemoveChainLocalOverridesByTargetRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Body *RemoveChainLocalOverridesByTargetRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *RemoveChainLocalOverridesByTargetRequest) Reset() {
- *x = RemoveChainLocalOverridesByTargetRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[38]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *RemoveChainLocalOverridesByTargetRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*RemoveChainLocalOverridesByTargetRequest) ProtoMessage() {}
-
-func (x *RemoveChainLocalOverridesByTargetRequest) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[38]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use RemoveChainLocalOverridesByTargetRequest.ProtoReflect.Descriptor instead.
-func (*RemoveChainLocalOverridesByTargetRequest) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{38}
-}
-
-func (x *RemoveChainLocalOverridesByTargetRequest) GetBody() *RemoveChainLocalOverridesByTargetRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *RemoveChainLocalOverridesByTargetRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-type RemoveChainLocalOverridesByTargetResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Body *RemoveChainLocalOverridesByTargetResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *RemoveChainLocalOverridesByTargetResponse) Reset() {
- *x = RemoveChainLocalOverridesByTargetResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[39]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *RemoveChainLocalOverridesByTargetResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*RemoveChainLocalOverridesByTargetResponse) ProtoMessage() {}
-
-func (x *RemoveChainLocalOverridesByTargetResponse) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[39]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use RemoveChainLocalOverridesByTargetResponse.ProtoReflect.Descriptor instead.
-func (*RemoveChainLocalOverridesByTargetResponse) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{39}
-}
-
-func (x *RemoveChainLocalOverridesByTargetResponse) GetBody() *RemoveChainLocalOverridesByTargetResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *RemoveChainLocalOverridesByTargetResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-type SealWriteCacheRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Body *SealWriteCacheRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *SealWriteCacheRequest) Reset() {
- *x = SealWriteCacheRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[40]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *SealWriteCacheRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*SealWriteCacheRequest) ProtoMessage() {}
-
-func (x *SealWriteCacheRequest) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[40]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use SealWriteCacheRequest.ProtoReflect.Descriptor instead.
-func (*SealWriteCacheRequest) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{40}
-}
-
-func (x *SealWriteCacheRequest) GetBody() *SealWriteCacheRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *SealWriteCacheRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-type SealWriteCacheResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Body *SealWriteCacheResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *SealWriteCacheResponse) Reset() {
- *x = SealWriteCacheResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[41]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *SealWriteCacheResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*SealWriteCacheResponse) ProtoMessage() {}
-
-func (x *SealWriteCacheResponse) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[41]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use SealWriteCacheResponse.ProtoReflect.Descriptor instead.
-func (*SealWriteCacheResponse) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{41}
-}
-
-func (x *SealWriteCacheResponse) GetBody() *SealWriteCacheResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *SealWriteCacheResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-type DetachShardsRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Body *DetachShardsRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *DetachShardsRequest) Reset() {
- *x = DetachShardsRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[42]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *DetachShardsRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*DetachShardsRequest) ProtoMessage() {}
-
-func (x *DetachShardsRequest) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[42]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use DetachShardsRequest.ProtoReflect.Descriptor instead.
-func (*DetachShardsRequest) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{42}
-}
-
-func (x *DetachShardsRequest) GetBody() *DetachShardsRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *DetachShardsRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-type DetachShardsResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Body *DetachShardsResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *DetachShardsResponse) Reset() {
- *x = DetachShardsResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[43]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *DetachShardsResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*DetachShardsResponse) ProtoMessage() {}
-
-func (x *DetachShardsResponse) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[43]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use DetachShardsResponse.ProtoReflect.Descriptor instead.
-func (*DetachShardsResponse) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{43}
-}
-
-func (x *DetachShardsResponse) GetBody() *DetachShardsResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *DetachShardsResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-// Health check request body.
-type HealthCheckRequest_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-}
-
-func (x *HealthCheckRequest_Body) Reset() {
- *x = HealthCheckRequest_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[44]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *HealthCheckRequest_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*HealthCheckRequest_Body) ProtoMessage() {}
-
-func (x *HealthCheckRequest_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[44]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use HealthCheckRequest_Body.ProtoReflect.Descriptor instead.
-func (*HealthCheckRequest_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{0, 0}
-}
-
-// Health check response body
-type HealthCheckResponse_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Status of the storage node in FrostFS network map.
- NetmapStatus NetmapStatus `protobuf:"varint,1,opt,name=netmap_status,json=netmapStatus,proto3,enum=control.NetmapStatus" json:"netmap_status,omitempty"`
- // Health status of storage node application.
- HealthStatus HealthStatus `protobuf:"varint,2,opt,name=health_status,json=healthStatus,proto3,enum=control.HealthStatus" json:"health_status,omitempty"`
-}
-
-func (x *HealthCheckResponse_Body) Reset() {
- *x = HealthCheckResponse_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[45]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *HealthCheckResponse_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*HealthCheckResponse_Body) ProtoMessage() {}
-
-func (x *HealthCheckResponse_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[45]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use HealthCheckResponse_Body.ProtoReflect.Descriptor instead.
-func (*HealthCheckResponse_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{1, 0}
-}
-
-func (x *HealthCheckResponse_Body) GetNetmapStatus() NetmapStatus {
- if x != nil {
- return x.NetmapStatus
- }
- return NetmapStatus_STATUS_UNDEFINED
-}
-
-func (x *HealthCheckResponse_Body) GetHealthStatus() HealthStatus {
- if x != nil {
- return x.HealthStatus
- }
- return HealthStatus_HEALTH_STATUS_UNDEFINED
-}
-
-// Set netmap status request body.
-type SetNetmapStatusRequest_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // New storage node status in FrostFS network map.
- // If status is MAINTENANCE, the node checks whether maintenance is
- // allowed in the network settings. In case of prohibition, the request
- // is denied. Otherwise, node switches to local maintenance state. To
- // force local maintenance, use `force_maintenance` flag.
- Status NetmapStatus `protobuf:"varint,1,opt,name=status,proto3,enum=control.NetmapStatus" json:"status,omitempty"`
- // MAINTENANCE status validation skip flag. If set, node starts local
- // maintenance regardless of network settings. The flag MUST NOT be
- // set for any other status.
- ForceMaintenance bool `protobuf:"varint,2,opt,name=force_maintenance,json=forceMaintenance,proto3" json:"force_maintenance,omitempty"`
-}
-
-func (x *SetNetmapStatusRequest_Body) Reset() {
- *x = SetNetmapStatusRequest_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[46]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *SetNetmapStatusRequest_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*SetNetmapStatusRequest_Body) ProtoMessage() {}
-
-func (x *SetNetmapStatusRequest_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[46]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use SetNetmapStatusRequest_Body.ProtoReflect.Descriptor instead.
-func (*SetNetmapStatusRequest_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{2, 0}
-}
-
-func (x *SetNetmapStatusRequest_Body) GetStatus() NetmapStatus {
- if x != nil {
- return x.Status
- }
- return NetmapStatus_STATUS_UNDEFINED
-}
-
-func (x *SetNetmapStatusRequest_Body) GetForceMaintenance() bool {
- if x != nil {
- return x.ForceMaintenance
- }
- return false
-}
-
-// Set netmap status response body
-type SetNetmapStatusResponse_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-}
-
-func (x *SetNetmapStatusResponse_Body) Reset() {
- *x = SetNetmapStatusResponse_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[47]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *SetNetmapStatusResponse_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*SetNetmapStatusResponse_Body) ProtoMessage() {}
-
-func (x *SetNetmapStatusResponse_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[47]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use SetNetmapStatusResponse_Body.ProtoReflect.Descriptor instead.
-func (*SetNetmapStatusResponse_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{3, 0}
-}
-
-type GetNetmapStatusRequest_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-}
-
-func (x *GetNetmapStatusRequest_Body) Reset() {
- *x = GetNetmapStatusRequest_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[48]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetNetmapStatusRequest_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetNetmapStatusRequest_Body) ProtoMessage() {}
-
-func (x *GetNetmapStatusRequest_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[48]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetNetmapStatusRequest_Body.ProtoReflect.Descriptor instead.
-func (*GetNetmapStatusRequest_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{4, 0}
-}
-
-type GetNetmapStatusResponse_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Storage node status in FrostFS network map.
- Status NetmapStatus `protobuf:"varint,1,opt,name=status,proto3,enum=control.NetmapStatus" json:"status,omitempty"`
- // Network map epoch.
- Epoch uint64 `protobuf:"varint,2,opt,name=epoch,proto3" json:"epoch,omitempty"`
-}
-
-func (x *GetNetmapStatusResponse_Body) Reset() {
- *x = GetNetmapStatusResponse_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[49]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetNetmapStatusResponse_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetNetmapStatusResponse_Body) ProtoMessage() {}
-
-func (x *GetNetmapStatusResponse_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[49]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetNetmapStatusResponse_Body.ProtoReflect.Descriptor instead.
-func (*GetNetmapStatusResponse_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{5, 0}
-}
-
-func (x *GetNetmapStatusResponse_Body) GetStatus() NetmapStatus {
- if x != nil {
- return x.Status
- }
- return NetmapStatus_STATUS_UNDEFINED
-}
-
-func (x *GetNetmapStatusResponse_Body) GetEpoch() uint64 {
- if x != nil {
- return x.Epoch
- }
- return 0
-}
-
-// Request body structure.
-type DropObjectsRequest_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // List of object addresses to be removed.
- // in FrostFS API binary format.
- AddressList [][]byte `protobuf:"bytes,1,rep,name=address_list,json=addressList,proto3" json:"address_list,omitempty"`
-}
-
-func (x *DropObjectsRequest_Body) Reset() {
- *x = DropObjectsRequest_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[50]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *DropObjectsRequest_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*DropObjectsRequest_Body) ProtoMessage() {}
-
-func (x *DropObjectsRequest_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[50]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use DropObjectsRequest_Body.ProtoReflect.Descriptor instead.
-func (*DropObjectsRequest_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{6, 0}
-}
-
-func (x *DropObjectsRequest_Body) GetAddressList() [][]byte {
- if x != nil {
- return x.AddressList
- }
- return nil
-}
-
-// Response body structure.
-type DropObjectsResponse_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-}
-
-func (x *DropObjectsResponse_Body) Reset() {
- *x = DropObjectsResponse_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[51]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *DropObjectsResponse_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*DropObjectsResponse_Body) ProtoMessage() {}
-
-func (x *DropObjectsResponse_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[51]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use DropObjectsResponse_Body.ProtoReflect.Descriptor instead.
-func (*DropObjectsResponse_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{7, 0}
-}
-
-// Request body structure.
-type ListShardsRequest_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-}
-
-func (x *ListShardsRequest_Body) Reset() {
- *x = ListShardsRequest_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[52]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ListShardsRequest_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ListShardsRequest_Body) ProtoMessage() {}
-
-func (x *ListShardsRequest_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[52]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ListShardsRequest_Body.ProtoReflect.Descriptor instead.
-func (*ListShardsRequest_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{8, 0}
-}
-
-// Response body structure.
-type ListShardsResponse_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // List of the node's shards.
- Shards []*ShardInfo `protobuf:"bytes,1,rep,name=shards,proto3" json:"shards,omitempty"`
-}
-
-func (x *ListShardsResponse_Body) Reset() {
- *x = ListShardsResponse_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[53]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ListShardsResponse_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ListShardsResponse_Body) ProtoMessage() {}
-
-func (x *ListShardsResponse_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[53]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ListShardsResponse_Body.ProtoReflect.Descriptor instead.
-func (*ListShardsResponse_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{9, 0}
-}
-
-func (x *ListShardsResponse_Body) GetShards() []*ShardInfo {
- if x != nil {
- return x.Shards
- }
- return nil
-}
-
-// Request body structure.
-type SetShardModeRequest_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // ID of the shard.
- Shard_ID [][]byte `protobuf:"bytes,1,rep,name=shard_ID,json=shardID,proto3" json:"shard_ID,omitempty"`
- // Mode that requested to be set.
- Mode ShardMode `protobuf:"varint,2,opt,name=mode,proto3,enum=control.ShardMode" json:"mode,omitempty"`
- // Flag signifying whether error counter should be set to 0.
- ResetErrorCounter bool `protobuf:"varint,3,opt,name=resetErrorCounter,proto3" json:"resetErrorCounter,omitempty"`
-}
-
-func (x *SetShardModeRequest_Body) Reset() {
- *x = SetShardModeRequest_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[54]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *SetShardModeRequest_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*SetShardModeRequest_Body) ProtoMessage() {}
-
-func (x *SetShardModeRequest_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[54]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use SetShardModeRequest_Body.ProtoReflect.Descriptor instead.
-func (*SetShardModeRequest_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{10, 0}
-}
-
-func (x *SetShardModeRequest_Body) GetShard_ID() [][]byte {
- if x != nil {
- return x.Shard_ID
- }
- return nil
-}
-
-func (x *SetShardModeRequest_Body) GetMode() ShardMode {
- if x != nil {
- return x.Mode
- }
- return ShardMode_SHARD_MODE_UNDEFINED
-}
-
-func (x *SetShardModeRequest_Body) GetResetErrorCounter() bool {
- if x != nil {
- return x.ResetErrorCounter
- }
- return false
-}
-
-// Response body structure.
-type SetShardModeResponse_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-}
-
-func (x *SetShardModeResponse_Body) Reset() {
- *x = SetShardModeResponse_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[55]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *SetShardModeResponse_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*SetShardModeResponse_Body) ProtoMessage() {}
-
-func (x *SetShardModeResponse_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[55]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use SetShardModeResponse_Body.ProtoReflect.Descriptor instead.
-func (*SetShardModeResponse_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{11, 0}
-}
-
-// Request body structure.
-type SynchronizeTreeRequest_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- ContainerId []byte `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
- TreeId string `protobuf:"bytes,2,opt,name=tree_id,json=treeId,proto3" json:"tree_id,omitempty"`
- // Starting height for the synchronization. Can be omitted.
- Height uint64 `protobuf:"varint,3,opt,name=height,proto3" json:"height,omitempty"`
-}
-
-func (x *SynchronizeTreeRequest_Body) Reset() {
- *x = SynchronizeTreeRequest_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[56]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *SynchronizeTreeRequest_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*SynchronizeTreeRequest_Body) ProtoMessage() {}
-
-func (x *SynchronizeTreeRequest_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[56]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use SynchronizeTreeRequest_Body.ProtoReflect.Descriptor instead.
-func (*SynchronizeTreeRequest_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{12, 0}
-}
-
-func (x *SynchronizeTreeRequest_Body) GetContainerId() []byte {
- if x != nil {
- return x.ContainerId
- }
- return nil
-}
-
-func (x *SynchronizeTreeRequest_Body) GetTreeId() string {
- if x != nil {
- return x.TreeId
- }
- return ""
-}
-
-func (x *SynchronizeTreeRequest_Body) GetHeight() uint64 {
- if x != nil {
- return x.Height
- }
- return 0
-}
-
-// Response body structure.
-type SynchronizeTreeResponse_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-}
-
-func (x *SynchronizeTreeResponse_Body) Reset() {
- *x = SynchronizeTreeResponse_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[57]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *SynchronizeTreeResponse_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*SynchronizeTreeResponse_Body) ProtoMessage() {}
-
-func (x *SynchronizeTreeResponse_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[57]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use SynchronizeTreeResponse_Body.ProtoReflect.Descriptor instead.
-func (*SynchronizeTreeResponse_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{13, 0}
-}
-
-// Request body structure.
-type EvacuateShardRequest_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // ID of the shard.
- Shard_ID [][]byte `protobuf:"bytes,1,rep,name=shard_ID,json=shardID,proto3" json:"shard_ID,omitempty"`
- // Flag indicating whether object read errors should be ignored.
- IgnoreErrors bool `protobuf:"varint,2,opt,name=ignore_errors,json=ignoreErrors,proto3" json:"ignore_errors,omitempty"`
-}
-
-func (x *EvacuateShardRequest_Body) Reset() {
- *x = EvacuateShardRequest_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[58]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *EvacuateShardRequest_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*EvacuateShardRequest_Body) ProtoMessage() {}
-
-func (x *EvacuateShardRequest_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[58]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use EvacuateShardRequest_Body.ProtoReflect.Descriptor instead.
-func (*EvacuateShardRequest_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{14, 0}
-}
-
-func (x *EvacuateShardRequest_Body) GetShard_ID() [][]byte {
- if x != nil {
- return x.Shard_ID
- }
- return nil
-}
-
-func (x *EvacuateShardRequest_Body) GetIgnoreErrors() bool {
- if x != nil {
- return x.IgnoreErrors
- }
- return false
-}
-
-// Response body structure.
-type EvacuateShardResponse_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Count uint32 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"`
-}
-
-func (x *EvacuateShardResponse_Body) Reset() {
- *x = EvacuateShardResponse_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[59]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *EvacuateShardResponse_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*EvacuateShardResponse_Body) ProtoMessage() {}
-
-func (x *EvacuateShardResponse_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[59]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use EvacuateShardResponse_Body.ProtoReflect.Descriptor instead.
-func (*EvacuateShardResponse_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{15, 0}
-}
-
-func (x *EvacuateShardResponse_Body) GetCount() uint32 {
- if x != nil {
- return x.Count
- }
- return 0
-}
-
-// Request body structure.
-type FlushCacheRequest_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // ID of the shard.
- Shard_ID [][]byte `protobuf:"bytes,1,rep,name=shard_ID,json=shardID,proto3" json:"shard_ID,omitempty"`
- // If true, then writecache will be left in read-only mode after flush
- // completed.
- Seal bool `protobuf:"varint,2,opt,name=seal,proto3" json:"seal,omitempty"`
-}
-
-func (x *FlushCacheRequest_Body) Reset() {
- *x = FlushCacheRequest_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[60]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *FlushCacheRequest_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*FlushCacheRequest_Body) ProtoMessage() {}
-
-func (x *FlushCacheRequest_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[60]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use FlushCacheRequest_Body.ProtoReflect.Descriptor instead.
-func (*FlushCacheRequest_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{16, 0}
-}
-
-func (x *FlushCacheRequest_Body) GetShard_ID() [][]byte {
- if x != nil {
- return x.Shard_ID
- }
- return nil
-}
-
-func (x *FlushCacheRequest_Body) GetSeal() bool {
- if x != nil {
- return x.Seal
- }
- return false
-}
-
-// Response body structure.
-type FlushCacheResponse_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-}
-
-func (x *FlushCacheResponse_Body) Reset() {
- *x = FlushCacheResponse_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[61]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *FlushCacheResponse_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*FlushCacheResponse_Body) ProtoMessage() {}
-
-func (x *FlushCacheResponse_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[61]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use FlushCacheResponse_Body.ProtoReflect.Descriptor instead.
-func (*FlushCacheResponse_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{17, 0}
-}
-
-// Request body structure.
-type DoctorRequest_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Number of threads to use for the operation.
- Concurrency uint32 `protobuf:"varint,1,opt,name=concurrency,proto3" json:"concurrency,omitempty"`
- // Flag to search engine for duplicate objects and leave only one copy.
- RemoveDuplicates bool `protobuf:"varint,2,opt,name=remove_duplicates,json=removeDuplicates,proto3" json:"remove_duplicates,omitempty"`
-}
-
-func (x *DoctorRequest_Body) Reset() {
- *x = DoctorRequest_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[62]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *DoctorRequest_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*DoctorRequest_Body) ProtoMessage() {}
-
-func (x *DoctorRequest_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[62]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use DoctorRequest_Body.ProtoReflect.Descriptor instead.
-func (*DoctorRequest_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{18, 0}
-}
-
-func (x *DoctorRequest_Body) GetConcurrency() uint32 {
- if x != nil {
- return x.Concurrency
- }
- return 0
-}
-
-func (x *DoctorRequest_Body) GetRemoveDuplicates() bool {
- if x != nil {
- return x.RemoveDuplicates
- }
- return false
-}
-
-// Response body structure.
-type DoctorResponse_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-}
-
-func (x *DoctorResponse_Body) Reset() {
- *x = DoctorResponse_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[63]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *DoctorResponse_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*DoctorResponse_Body) ProtoMessage() {}
-
-func (x *DoctorResponse_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[63]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use DoctorResponse_Body.ProtoReflect.Descriptor instead.
-func (*DoctorResponse_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{19, 0}
-}
-
-// Request body structure.
-type StartShardEvacuationRequest_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // IDs of the shards.
- Shard_ID [][]byte `protobuf:"bytes,1,rep,name=shard_ID,json=shardID,proto3" json:"shard_ID,omitempty"`
- // Flag indicating whether object read errors should be ignored.
- IgnoreErrors bool `protobuf:"varint,2,opt,name=ignore_errors,json=ignoreErrors,proto3" json:"ignore_errors,omitempty"`
- // Evacuation scope.
- Scope uint32 `protobuf:"varint,3,opt,name=scope,proto3" json:"scope,omitempty"`
-}
-
-func (x *StartShardEvacuationRequest_Body) Reset() {
- *x = StartShardEvacuationRequest_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[64]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *StartShardEvacuationRequest_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*StartShardEvacuationRequest_Body) ProtoMessage() {}
-
-func (x *StartShardEvacuationRequest_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[64]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use StartShardEvacuationRequest_Body.ProtoReflect.Descriptor instead.
-func (*StartShardEvacuationRequest_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{20, 0}
-}
-
-func (x *StartShardEvacuationRequest_Body) GetShard_ID() [][]byte {
- if x != nil {
- return x.Shard_ID
- }
- return nil
-}
-
-func (x *StartShardEvacuationRequest_Body) GetIgnoreErrors() bool {
- if x != nil {
- return x.IgnoreErrors
- }
- return false
-}
-
-func (x *StartShardEvacuationRequest_Body) GetScope() uint32 {
- if x != nil {
- return x.Scope
- }
- return 0
-}
-
-// Response body structure.
-type StartShardEvacuationResponse_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-}
-
-func (x *StartShardEvacuationResponse_Body) Reset() {
- *x = StartShardEvacuationResponse_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[65]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *StartShardEvacuationResponse_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*StartShardEvacuationResponse_Body) ProtoMessage() {}
-
-func (x *StartShardEvacuationResponse_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[65]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use StartShardEvacuationResponse_Body.ProtoReflect.Descriptor instead.
-func (*StartShardEvacuationResponse_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{21, 0}
-}
-
-// Request body structure.
-type GetShardEvacuationStatusRequest_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-}
-
-func (x *GetShardEvacuationStatusRequest_Body) Reset() {
- *x = GetShardEvacuationStatusRequest_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[66]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetShardEvacuationStatusRequest_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetShardEvacuationStatusRequest_Body) ProtoMessage() {}
-
-func (x *GetShardEvacuationStatusRequest_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[66]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetShardEvacuationStatusRequest_Body.ProtoReflect.Descriptor instead.
-func (*GetShardEvacuationStatusRequest_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{22, 0}
-}
-
-// Response body structure.
-type GetShardEvacuationStatusResponse_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Total objects to evacuate count. The value is approximate, so evacuated +
- // failed + skipped == total is not guaranteed after completion.
- TotalObjects uint64 `protobuf:"varint,1,opt,name=total_objects,json=totalObjects,proto3" json:"total_objects,omitempty"`
- // Evacuated objects count.
- EvacuatedObjects uint64 `protobuf:"varint,2,opt,name=evacuated_objects,json=evacuatedObjects,proto3" json:"evacuated_objects,omitempty"`
- // Failed objects count.
- FailedObjects uint64 `protobuf:"varint,3,opt,name=failed_objects,json=failedObjects,proto3" json:"failed_objects,omitempty"`
- // Shard IDs.
- Shard_ID [][]byte `protobuf:"bytes,4,rep,name=shard_ID,json=shardID,proto3" json:"shard_ID,omitempty"`
- // Evacuation process status.
- Status GetShardEvacuationStatusResponse_Body_Status `protobuf:"varint,5,opt,name=status,proto3,enum=control.GetShardEvacuationStatusResponse_Body_Status" json:"status,omitempty"`
- // Evacuation process duration.
- Duration *GetShardEvacuationStatusResponse_Body_Duration `protobuf:"bytes,6,opt,name=duration,proto3" json:"duration,omitempty"`
- // Evacuation process started at timestamp.
- StartedAt *GetShardEvacuationStatusResponse_Body_UnixTimestamp `protobuf:"bytes,7,opt,name=started_at,json=startedAt,proto3" json:"started_at,omitempty"`
- // Error message if evacuation failed.
- ErrorMessage string `protobuf:"bytes,8,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"`
- // Skipped objects count.
- SkippedObjects uint64 `protobuf:"varint,9,opt,name=skipped_objects,json=skippedObjects,proto3" json:"skipped_objects,omitempty"`
- // Total trees to evacuate count.
- TotalTrees uint64 `protobuf:"varint,10,opt,name=total_trees,json=totalTrees,proto3" json:"total_trees,omitempty"`
- // Evacuated trees count.
- EvacuatedTrees uint64 `protobuf:"varint,11,opt,name=evacuated_trees,json=evacuatedTrees,proto3" json:"evacuated_trees,omitempty"`
- // Failed trees count.
- FailedTrees uint64 `protobuf:"varint,12,opt,name=failed_trees,json=failedTrees,proto3" json:"failed_trees,omitempty"`
-}
-
-func (x *GetShardEvacuationStatusResponse_Body) Reset() {
- *x = GetShardEvacuationStatusResponse_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[67]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetShardEvacuationStatusResponse_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetShardEvacuationStatusResponse_Body) ProtoMessage() {}
-
-func (x *GetShardEvacuationStatusResponse_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[67]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetShardEvacuationStatusResponse_Body.ProtoReflect.Descriptor instead.
-func (*GetShardEvacuationStatusResponse_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{23, 0}
-}
-
-func (x *GetShardEvacuationStatusResponse_Body) GetTotalObjects() uint64 {
- if x != nil {
- return x.TotalObjects
- }
- return 0
-}
-
-func (x *GetShardEvacuationStatusResponse_Body) GetEvacuatedObjects() uint64 {
- if x != nil {
- return x.EvacuatedObjects
- }
- return 0
-}
-
-func (x *GetShardEvacuationStatusResponse_Body) GetFailedObjects() uint64 {
- if x != nil {
- return x.FailedObjects
- }
- return 0
-}
-
-func (x *GetShardEvacuationStatusResponse_Body) GetShard_ID() [][]byte {
- if x != nil {
- return x.Shard_ID
- }
- return nil
-}
-
-func (x *GetShardEvacuationStatusResponse_Body) GetStatus() GetShardEvacuationStatusResponse_Body_Status {
- if x != nil {
- return x.Status
- }
- return GetShardEvacuationStatusResponse_Body_EVACUATE_SHARD_STATUS_UNDEFINED
-}
-
-func (x *GetShardEvacuationStatusResponse_Body) GetDuration() *GetShardEvacuationStatusResponse_Body_Duration {
- if x != nil {
- return x.Duration
- }
- return nil
-}
-
-func (x *GetShardEvacuationStatusResponse_Body) GetStartedAt() *GetShardEvacuationStatusResponse_Body_UnixTimestamp {
- if x != nil {
- return x.StartedAt
- }
- return nil
-}
-
-func (x *GetShardEvacuationStatusResponse_Body) GetErrorMessage() string {
- if x != nil {
- return x.ErrorMessage
- }
- return ""
-}
-
-func (x *GetShardEvacuationStatusResponse_Body) GetSkippedObjects() uint64 {
- if x != nil {
- return x.SkippedObjects
- }
- return 0
-}
-
-func (x *GetShardEvacuationStatusResponse_Body) GetTotalTrees() uint64 {
- if x != nil {
- return x.TotalTrees
- }
- return 0
-}
-
-func (x *GetShardEvacuationStatusResponse_Body) GetEvacuatedTrees() uint64 {
- if x != nil {
- return x.EvacuatedTrees
- }
- return 0
-}
-
-func (x *GetShardEvacuationStatusResponse_Body) GetFailedTrees() uint64 {
- if x != nil {
- return x.FailedTrees
- }
- return 0
-}
-
-// Unix timestamp value.
-type GetShardEvacuationStatusResponse_Body_UnixTimestamp struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
-}
-
-func (x *GetShardEvacuationStatusResponse_Body_UnixTimestamp) Reset() {
- *x = GetShardEvacuationStatusResponse_Body_UnixTimestamp{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[68]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetShardEvacuationStatusResponse_Body_UnixTimestamp) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetShardEvacuationStatusResponse_Body_UnixTimestamp) ProtoMessage() {}
-
-func (x *GetShardEvacuationStatusResponse_Body_UnixTimestamp) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[68]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetShardEvacuationStatusResponse_Body_UnixTimestamp.ProtoReflect.Descriptor instead.
-func (*GetShardEvacuationStatusResponse_Body_UnixTimestamp) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{23, 0, 0}
-}
-
-func (x *GetShardEvacuationStatusResponse_Body_UnixTimestamp) GetValue() int64 {
- if x != nil {
- return x.Value
- }
- return 0
-}
-
-// Duration in seconds.
-type GetShardEvacuationStatusResponse_Body_Duration struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
-}
-
-func (x *GetShardEvacuationStatusResponse_Body_Duration) Reset() {
- *x = GetShardEvacuationStatusResponse_Body_Duration{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[69]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetShardEvacuationStatusResponse_Body_Duration) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetShardEvacuationStatusResponse_Body_Duration) ProtoMessage() {}
-
-func (x *GetShardEvacuationStatusResponse_Body_Duration) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[69]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetShardEvacuationStatusResponse_Body_Duration.ProtoReflect.Descriptor instead.
-func (*GetShardEvacuationStatusResponse_Body_Duration) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{23, 0, 1}
-}
-
-func (x *GetShardEvacuationStatusResponse_Body_Duration) GetSeconds() int64 {
- if x != nil {
- return x.Seconds
- }
- return 0
-}
-
-type ResetShardEvacuationStatusRequest_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-}
-
-func (x *ResetShardEvacuationStatusRequest_Body) Reset() {
- *x = ResetShardEvacuationStatusRequest_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[70]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ResetShardEvacuationStatusRequest_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ResetShardEvacuationStatusRequest_Body) ProtoMessage() {}
-
-func (x *ResetShardEvacuationStatusRequest_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[70]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ResetShardEvacuationStatusRequest_Body.ProtoReflect.Descriptor instead.
-func (*ResetShardEvacuationStatusRequest_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{24, 0}
-}
-
-type ResetShardEvacuationStatusResponse_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-}
-
-func (x *ResetShardEvacuationStatusResponse_Body) Reset() {
- *x = ResetShardEvacuationStatusResponse_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[71]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ResetShardEvacuationStatusResponse_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ResetShardEvacuationStatusResponse_Body) ProtoMessage() {}
-
-func (x *ResetShardEvacuationStatusResponse_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[71]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ResetShardEvacuationStatusResponse_Body.ProtoReflect.Descriptor instead.
-func (*ResetShardEvacuationStatusResponse_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{25, 0}
-}
-
-// Request body structure.
-type StopShardEvacuationRequest_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-}
-
-func (x *StopShardEvacuationRequest_Body) Reset() {
- *x = StopShardEvacuationRequest_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[72]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *StopShardEvacuationRequest_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*StopShardEvacuationRequest_Body) ProtoMessage() {}
-
-func (x *StopShardEvacuationRequest_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[72]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use StopShardEvacuationRequest_Body.ProtoReflect.Descriptor instead.
-func (*StopShardEvacuationRequest_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{26, 0}
-}
-
-// Response body structure.
-type StopShardEvacuationResponse_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-}
-
-func (x *StopShardEvacuationResponse_Body) Reset() {
- *x = StopShardEvacuationResponse_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[73]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *StopShardEvacuationResponse_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*StopShardEvacuationResponse_Body) ProtoMessage() {}
-
-func (x *StopShardEvacuationResponse_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[73]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use StopShardEvacuationResponse_Body.ProtoReflect.Descriptor instead.
-func (*StopShardEvacuationResponse_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{27, 0}
-}
-
-type AddChainLocalOverrideRequest_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Target for which the overrides are applied.
- Target *ChainTarget `protobuf:"bytes,1,opt,name=target,proto3" json:"target,omitempty"`
- // Serialized rule chain. If chain ID is left empty
- // in the chain, then it will be generated and returned
- // in the response.
- Chain []byte `protobuf:"bytes,2,opt,name=chain,proto3" json:"chain,omitempty"`
-}
-
-func (x *AddChainLocalOverrideRequest_Body) Reset() {
- *x = AddChainLocalOverrideRequest_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[74]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *AddChainLocalOverrideRequest_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*AddChainLocalOverrideRequest_Body) ProtoMessage() {}
-
-func (x *AddChainLocalOverrideRequest_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[74]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use AddChainLocalOverrideRequest_Body.ProtoReflect.Descriptor instead.
-func (*AddChainLocalOverrideRequest_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{28, 0}
-}
-
-func (x *AddChainLocalOverrideRequest_Body) GetTarget() *ChainTarget {
- if x != nil {
- return x.Target
- }
- return nil
-}
-
-func (x *AddChainLocalOverrideRequest_Body) GetChain() []byte {
- if x != nil {
- return x.Chain
- }
- return nil
-}
-
-type AddChainLocalOverrideResponse_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Chain ID assigned for the added rule chain.
- // If chain ID is left empty in the request, then
- // it will be generated.
- ChainId []byte `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"`
-}
-
-func (x *AddChainLocalOverrideResponse_Body) Reset() {
- *x = AddChainLocalOverrideResponse_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[75]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *AddChainLocalOverrideResponse_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*AddChainLocalOverrideResponse_Body) ProtoMessage() {}
-
-func (x *AddChainLocalOverrideResponse_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[75]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use AddChainLocalOverrideResponse_Body.ProtoReflect.Descriptor instead.
-func (*AddChainLocalOverrideResponse_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{29, 0}
-}
-
-func (x *AddChainLocalOverrideResponse_Body) GetChainId() []byte {
- if x != nil {
- return x.ChainId
- }
- return nil
-}
-
-type GetChainLocalOverrideRequest_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Target for which the overrides are applied.
- Target *ChainTarget `protobuf:"bytes,1,opt,name=target,proto3" json:"target,omitempty"`
- // Chain ID assigned for the added rule chain.
- ChainId []byte `protobuf:"bytes,2,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"`
-}
-
-func (x *GetChainLocalOverrideRequest_Body) Reset() {
- *x = GetChainLocalOverrideRequest_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[76]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetChainLocalOverrideRequest_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetChainLocalOverrideRequest_Body) ProtoMessage() {}
-
-func (x *GetChainLocalOverrideRequest_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[76]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetChainLocalOverrideRequest_Body.ProtoReflect.Descriptor instead.
-func (*GetChainLocalOverrideRequest_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{30, 0}
-}
-
-func (x *GetChainLocalOverrideRequest_Body) GetTarget() *ChainTarget {
- if x != nil {
- return x.Target
- }
- return nil
-}
-
-func (x *GetChainLocalOverrideRequest_Body) GetChainId() []byte {
- if x != nil {
- return x.ChainId
- }
- return nil
-}
-
-type GetChainLocalOverrideResponse_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Serialized rule chain.
- Chain []byte `protobuf:"bytes,1,opt,name=chain,proto3" json:"chain,omitempty"`
-}
-
-func (x *GetChainLocalOverrideResponse_Body) Reset() {
- *x = GetChainLocalOverrideResponse_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[77]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetChainLocalOverrideResponse_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetChainLocalOverrideResponse_Body) ProtoMessage() {}
-
-func (x *GetChainLocalOverrideResponse_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[77]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetChainLocalOverrideResponse_Body.ProtoReflect.Descriptor instead.
-func (*GetChainLocalOverrideResponse_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{31, 0}
-}
-
-func (x *GetChainLocalOverrideResponse_Body) GetChain() []byte {
- if x != nil {
- return x.Chain
- }
- return nil
-}
-
-type ListChainLocalOverridesRequest_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Target for which the overrides are applied.
- Target *ChainTarget `protobuf:"bytes,1,opt,name=target,proto3" json:"target,omitempty"`
-}
-
-func (x *ListChainLocalOverridesRequest_Body) Reset() {
- *x = ListChainLocalOverridesRequest_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[78]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ListChainLocalOverridesRequest_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ListChainLocalOverridesRequest_Body) ProtoMessage() {}
-
-func (x *ListChainLocalOverridesRequest_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[78]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ListChainLocalOverridesRequest_Body.ProtoReflect.Descriptor instead.
-func (*ListChainLocalOverridesRequest_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{32, 0}
-}
-
-func (x *ListChainLocalOverridesRequest_Body) GetTarget() *ChainTarget {
- if x != nil {
- return x.Target
- }
- return nil
-}
-
-type ListChainLocalOverridesResponse_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The list of serialized rule chain.
- Chains [][]byte `protobuf:"bytes,1,rep,name=chains,proto3" json:"chains,omitempty"`
-}
-
-func (x *ListChainLocalOverridesResponse_Body) Reset() {
- *x = ListChainLocalOverridesResponse_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[79]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ListChainLocalOverridesResponse_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ListChainLocalOverridesResponse_Body) ProtoMessage() {}
-
-func (x *ListChainLocalOverridesResponse_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[79]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ListChainLocalOverridesResponse_Body.ProtoReflect.Descriptor instead.
-func (*ListChainLocalOverridesResponse_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{33, 0}
-}
-
-func (x *ListChainLocalOverridesResponse_Body) GetChains() [][]byte {
- if x != nil {
- return x.Chains
- }
- return nil
-}
-
-type ListTargetsLocalOverridesRequest_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Target for which the overrides are applied.
- ChainName string `protobuf:"bytes,1,opt,name=chainName,proto3" json:"chainName,omitempty"`
-}
-
-func (x *ListTargetsLocalOverridesRequest_Body) Reset() {
- *x = ListTargetsLocalOverridesRequest_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[80]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ListTargetsLocalOverridesRequest_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ListTargetsLocalOverridesRequest_Body) ProtoMessage() {}
-
-func (x *ListTargetsLocalOverridesRequest_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[80]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ListTargetsLocalOverridesRequest_Body.ProtoReflect.Descriptor instead.
-func (*ListTargetsLocalOverridesRequest_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{34, 0}
-}
-
-func (x *ListTargetsLocalOverridesRequest_Body) GetChainName() string {
- if x != nil {
- return x.ChainName
- }
- return ""
-}
-
-type ListTargetsLocalOverridesResponse_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The list of chain targets.
- Targets []*ChainTarget `protobuf:"bytes,1,rep,name=targets,proto3" json:"targets,omitempty"`
-}
-
-func (x *ListTargetsLocalOverridesResponse_Body) Reset() {
- *x = ListTargetsLocalOverridesResponse_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[81]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ListTargetsLocalOverridesResponse_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ListTargetsLocalOverridesResponse_Body) ProtoMessage() {}
-
-func (x *ListTargetsLocalOverridesResponse_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[81]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ListTargetsLocalOverridesResponse_Body.ProtoReflect.Descriptor instead.
-func (*ListTargetsLocalOverridesResponse_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{35, 0}
-}
-
-func (x *ListTargetsLocalOverridesResponse_Body) GetTargets() []*ChainTarget {
- if x != nil {
- return x.Targets
- }
- return nil
-}
-
-type RemoveChainLocalOverrideRequest_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Target for which the overrides are applied.
- Target *ChainTarget `protobuf:"bytes,1,opt,name=target,proto3" json:"target,omitempty"`
- // Chain ID assigned for the added rule chain.
- ChainId []byte `protobuf:"bytes,2,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"`
-}
-
-func (x *RemoveChainLocalOverrideRequest_Body) Reset() {
- *x = RemoveChainLocalOverrideRequest_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[82]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *RemoveChainLocalOverrideRequest_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*RemoveChainLocalOverrideRequest_Body) ProtoMessage() {}
-
-func (x *RemoveChainLocalOverrideRequest_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[82]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use RemoveChainLocalOverrideRequest_Body.ProtoReflect.Descriptor instead.
-func (*RemoveChainLocalOverrideRequest_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{36, 0}
-}
-
-func (x *RemoveChainLocalOverrideRequest_Body) GetTarget() *ChainTarget {
- if x != nil {
- return x.Target
- }
- return nil
-}
-
-func (x *RemoveChainLocalOverrideRequest_Body) GetChainId() []byte {
- if x != nil {
- return x.ChainId
- }
- return nil
-}
-
-type RemoveChainLocalOverrideResponse_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-}
-
-func (x *RemoveChainLocalOverrideResponse_Body) Reset() {
- *x = RemoveChainLocalOverrideResponse_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[83]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *RemoveChainLocalOverrideResponse_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*RemoveChainLocalOverrideResponse_Body) ProtoMessage() {}
-
-func (x *RemoveChainLocalOverrideResponse_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[83]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use RemoveChainLocalOverrideResponse_Body.ProtoReflect.Descriptor instead.
-func (*RemoveChainLocalOverrideResponse_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{37, 0}
-}
-
-type RemoveChainLocalOverridesByTargetRequest_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Target for which the overrides are applied.
- Target *ChainTarget `protobuf:"bytes,1,opt,name=target,proto3" json:"target,omitempty"`
-}
-
-func (x *RemoveChainLocalOverridesByTargetRequest_Body) Reset() {
- *x = RemoveChainLocalOverridesByTargetRequest_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[84]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *RemoveChainLocalOverridesByTargetRequest_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*RemoveChainLocalOverridesByTargetRequest_Body) ProtoMessage() {}
-
-func (x *RemoveChainLocalOverridesByTargetRequest_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[84]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use RemoveChainLocalOverridesByTargetRequest_Body.ProtoReflect.Descriptor instead.
-func (*RemoveChainLocalOverridesByTargetRequest_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{38, 0}
-}
-
-func (x *RemoveChainLocalOverridesByTargetRequest_Body) GetTarget() *ChainTarget {
- if x != nil {
- return x.Target
- }
- return nil
-}
-
-type RemoveChainLocalOverridesByTargetResponse_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-}
-
-func (x *RemoveChainLocalOverridesByTargetResponse_Body) Reset() {
- *x = RemoveChainLocalOverridesByTargetResponse_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[85]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *RemoveChainLocalOverridesByTargetResponse_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*RemoveChainLocalOverridesByTargetResponse_Body) ProtoMessage() {}
-
-func (x *RemoveChainLocalOverridesByTargetResponse_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[85]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use RemoveChainLocalOverridesByTargetResponse_Body.ProtoReflect.Descriptor instead.
-func (*RemoveChainLocalOverridesByTargetResponse_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{39, 0}
-}
-
-// Request body structure.
-type SealWriteCacheRequest_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // ID of the shard.
- Shard_ID [][]byte `protobuf:"bytes,1,rep,name=shard_ID,json=shardID,proto3" json:"shard_ID,omitempty"`
- // Flag indicating whether object read errors should be ignored.
- IgnoreErrors bool `protobuf:"varint,2,opt,name=ignore_errors,json=ignoreErrors,proto3" json:"ignore_errors,omitempty"`
- // Flag indicating whether writecache will be sealed async.
- Async bool `protobuf:"varint,3,opt,name=async,proto3" json:"async,omitempty"`
- // If true, then writecache will be sealed, but mode will be restored to the current one.
- RestoreMode bool `protobuf:"varint,4,opt,name=restore_mode,json=restoreMode,proto3" json:"restore_mode,omitempty"`
- // If true, then writecache will shrink internal storage.
- Shrink bool `protobuf:"varint,5,opt,name=shrink,proto3" json:"shrink,omitempty"`
-}
-
-func (x *SealWriteCacheRequest_Body) Reset() {
- *x = SealWriteCacheRequest_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[86]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *SealWriteCacheRequest_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*SealWriteCacheRequest_Body) ProtoMessage() {}
-
-func (x *SealWriteCacheRequest_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[86]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use SealWriteCacheRequest_Body.ProtoReflect.Descriptor instead.
-func (*SealWriteCacheRequest_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{40, 0}
-}
-
-func (x *SealWriteCacheRequest_Body) GetShard_ID() [][]byte {
- if x != nil {
- return x.Shard_ID
- }
- return nil
-}
-
-func (x *SealWriteCacheRequest_Body) GetIgnoreErrors() bool {
- if x != nil {
- return x.IgnoreErrors
- }
- return false
-}
-
-func (x *SealWriteCacheRequest_Body) GetAsync() bool {
- if x != nil {
- return x.Async
- }
- return false
-}
-
-func (x *SealWriteCacheRequest_Body) GetRestoreMode() bool {
- if x != nil {
- return x.RestoreMode
- }
- return false
-}
-
-func (x *SealWriteCacheRequest_Body) GetShrink() bool {
- if x != nil {
- return x.Shrink
- }
- return false
-}
-
-type SealWriteCacheResponse_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Results []*SealWriteCacheResponse_Body_Status `protobuf:"bytes,1,rep,name=results,proto3" json:"results,omitempty"`
-}
-
-func (x *SealWriteCacheResponse_Body) Reset() {
- *x = SealWriteCacheResponse_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[87]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *SealWriteCacheResponse_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*SealWriteCacheResponse_Body) ProtoMessage() {}
-
-func (x *SealWriteCacheResponse_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[87]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use SealWriteCacheResponse_Body.ProtoReflect.Descriptor instead.
-func (*SealWriteCacheResponse_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{41, 0}
-}
-
-func (x *SealWriteCacheResponse_Body) GetResults() []*SealWriteCacheResponse_Body_Status {
- if x != nil {
- return x.Results
- }
- return nil
-}
-
-type SealWriteCacheResponse_Body_Status struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Shard_ID []byte `protobuf:"bytes,1,opt,name=shard_ID,json=shardID,proto3" json:"shard_ID,omitempty"`
- Success bool `protobuf:"varint,2,opt,name=success,proto3" json:"success,omitempty"`
- Error string `protobuf:"bytes,3,opt,name=error,proto3" json:"error,omitempty"`
-}
-
-func (x *SealWriteCacheResponse_Body_Status) Reset() {
- *x = SealWriteCacheResponse_Body_Status{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[88]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *SealWriteCacheResponse_Body_Status) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*SealWriteCacheResponse_Body_Status) ProtoMessage() {}
-
-func (x *SealWriteCacheResponse_Body_Status) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[88]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use SealWriteCacheResponse_Body_Status.ProtoReflect.Descriptor instead.
-func (*SealWriteCacheResponse_Body_Status) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{41, 0, 0}
-}
-
-func (x *SealWriteCacheResponse_Body_Status) GetShard_ID() []byte {
- if x != nil {
- return x.Shard_ID
- }
- return nil
-}
-
-func (x *SealWriteCacheResponse_Body_Status) GetSuccess() bool {
- if x != nil {
- return x.Success
- }
- return false
-}
-
-func (x *SealWriteCacheResponse_Body_Status) GetError() string {
- if x != nil {
- return x.Error
- }
- return ""
-}
-
-type DetachShardsRequest_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Shard_ID [][]byte `protobuf:"bytes,1,rep,name=shard_ID,json=shardID,proto3" json:"shard_ID,omitempty"`
-}
-
-func (x *DetachShardsRequest_Body) Reset() {
- *x = DetachShardsRequest_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[89]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *DetachShardsRequest_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*DetachShardsRequest_Body) ProtoMessage() {}
-
-func (x *DetachShardsRequest_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[89]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use DetachShardsRequest_Body.ProtoReflect.Descriptor instead.
-func (*DetachShardsRequest_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{42, 0}
-}
-
-func (x *DetachShardsRequest_Body) GetShard_ID() [][]byte {
- if x != nil {
- return x.Shard_ID
- }
- return nil
-}
-
-type DetachShardsResponse_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-}
-
-func (x *DetachShardsResponse_Body) Reset() {
- *x = DetachShardsResponse_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[90]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *DetachShardsResponse_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*DetachShardsResponse_Body) ProtoMessage() {}
-
-func (x *DetachShardsResponse_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[90]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use DetachShardsResponse_Body.ProtoReflect.Descriptor instead.
-func (*DetachShardsResponse_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{43, 0}
-}
-
-var File_pkg_services_control_service_proto protoreflect.FileDescriptor
-
-var file_pkg_services_control_service_proto_rawDesc = []byte{
- 0x0a, 0x22, 0x70, 0x6b, 0x67, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x63,
- 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x1a, 0x20, 0x70,
- 0x6b, 0x67, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x63, 0x6f, 0x6e, 0x74,
- 0x72, 0x6f, 0x6c, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22,
- 0x84, 0x01, 0x0a, 0x12, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x34, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x48,
- 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09,
- 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74,
- 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x06,
- 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x22, 0xfe, 0x01, 0x0a, 0x13, 0x48, 0x65, 0x61, 0x6c, 0x74,
- 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x35,
- 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x63,
- 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65,
- 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52,
- 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75,
- 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72,
- 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69,
- 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x7e, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12,
- 0x3a, 0x0a, 0x0d, 0x6e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c,
- 0x2e, 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0c, 0x6e,
- 0x65, 0x74, 0x6d, 0x61, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x3a, 0x0a, 0x0d, 0x68,
- 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01,
- 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x48, 0x65, 0x61,
- 0x6c, 0x74, 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0c, 0x68, 0x65, 0x61, 0x6c, 0x74,
- 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0xe8, 0x01, 0x0a, 0x16, 0x53, 0x65, 0x74, 0x4e,
- 0x65, 0x74, 0x6d, 0x61, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x12, 0x38, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x24, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x65, 0x74, 0x4e, 0x65,
- 0x74, 0x6d, 0x61, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09,
- 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74,
- 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x62,
- 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x2d, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c,
- 0x2e, 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73,
- 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x5f, 0x6d,
- 0x61, 0x69, 0x6e, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08,
- 0x52, 0x10, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x4d, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x6e, 0x61, 0x6e,
- 0x63, 0x65, 0x22, 0x8e, 0x01, 0x0a, 0x17, 0x53, 0x65, 0x74, 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70,
- 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x39,
- 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x63,
- 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x65, 0x74, 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70,
- 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42,
- 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67,
- 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63,
- 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65,
- 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x06, 0x0a, 0x04, 0x42,
- 0x6f, 0x64, 0x79, 0x22, 0x8c, 0x01, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x4e, 0x65, 0x74, 0x6d, 0x61,
- 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38,
- 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x63,
- 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70,
- 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f,
- 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e,
- 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f,
- 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52,
- 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x06, 0x0a, 0x04, 0x42, 0x6f,
- 0x64, 0x79, 0x22, 0xd3, 0x01, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70,
- 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x39,
- 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x63,
- 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70,
- 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42,
- 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67,
- 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63,
- 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65,
- 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x4b, 0x0a, 0x04, 0x42,
- 0x6f, 0x64, 0x79, 0x12, 0x2d, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x4e, 0x65,
- 0x74, 0x6d, 0x61, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74,
- 0x75, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28,
- 0x04, 0x52, 0x05, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x22, 0xa7, 0x01, 0x0a, 0x12, 0x44, 0x72, 0x6f,
- 0x70, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
- 0x34, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e,
- 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x44, 0x72, 0x6f, 0x70, 0x4f, 0x62, 0x6a, 0x65,
- 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52,
- 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75,
- 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72,
- 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69,
- 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x29, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12,
- 0x21, 0x0a, 0x0c, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18,
- 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0b, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x4c, 0x69,
- 0x73, 0x74, 0x22, 0x86, 0x01, 0x0a, 0x13, 0x44, 0x72, 0x6f, 0x70, 0x4f, 0x62, 0x6a, 0x65, 0x63,
- 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x35, 0x0a, 0x04, 0x62, 0x6f,
- 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72,
- 0x6f, 0x6c, 0x2e, 0x44, 0x72, 0x6f, 0x70, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65,
- 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64,
- 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53,
- 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74,
- 0x75, 0x72, 0x65, 0x1a, 0x06, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x22, 0x82, 0x01, 0x0a, 0x11,
- 0x4c, 0x69, 0x73, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x12, 0x33, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x1f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x68,
- 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79,
- 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74,
- 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74,
- 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73,
- 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x06, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79,
- 0x22, 0xb0, 0x01, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52,
- 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x34, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e,
- 0x4c, 0x69, 0x73, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a,
- 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61,
- 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a,
- 0x32, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x2a, 0x0a, 0x06, 0x73, 0x68, 0x61, 0x72, 0x64,
- 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f,
- 0x6c, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x06, 0x73, 0x68, 0x61,
- 0x72, 0x64, 0x73, 0x22, 0xf7, 0x01, 0x0a, 0x13, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64,
- 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x35, 0x0a, 0x04, 0x62,
- 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x63, 0x6f, 0x6e, 0x74,
- 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x4d, 0x6f, 0x64, 0x65,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f,
- 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e,
- 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61,
- 0x74, 0x75, 0x72, 0x65, 0x1a, 0x77, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x19, 0x0a, 0x08,
- 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x49, 0x44, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x07,
- 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x44, 0x12, 0x26, 0x0a, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e,
- 0x53, 0x68, 0x61, 0x72, 0x64, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x12,
- 0x2c, 0x0a, 0x11, 0x72, 0x65, 0x73, 0x65, 0x74, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x43, 0x6f, 0x75,
- 0x6e, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x72, 0x65, 0x73, 0x65,
- 0x74, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x22, 0x88, 0x01,
- 0x0a, 0x14, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x65,
- 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x36, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53,
- 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f,
- 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30,
- 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e,
- 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65,
- 0x1a, 0x06, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x22, 0xe0, 0x01, 0x0a, 0x16, 0x53, 0x79, 0x6e,
- 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x65, 0x54, 0x72, 0x65, 0x65, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x24, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x79, 0x6e, 0x63,
- 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x65, 0x54, 0x72, 0x65, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a,
- 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61,
- 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a,
- 0x5a, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61,
- 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x63,
- 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x72,
- 0x65, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x72, 0x65,
- 0x65, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x03, 0x20,
- 0x01, 0x28, 0x04, 0x52, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x22, 0x8e, 0x01, 0x0a, 0x17,
- 0x53, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x65, 0x54, 0x72, 0x65, 0x65, 0x52,
- 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x39, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e,
- 0x53, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x65, 0x54, 0x72, 0x65, 0x65, 0x52,
- 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f,
- 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e,
- 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61,
- 0x74, 0x75, 0x72, 0x65, 0x1a, 0x06, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x22, 0xc8, 0x01, 0x0a,
- 0x14, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x36, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x45, 0x76,
- 0x61, 0x63, 0x75, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a,
- 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61,
- 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a,
- 0x46, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64,
- 0x5f, 0x49, 0x44, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x07, 0x73, 0x68, 0x61, 0x72, 0x64,
- 0x49, 0x44, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x65, 0x72, 0x72,
- 0x6f, 0x72, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x69, 0x67, 0x6e, 0x6f, 0x72,
- 0x65, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x22, 0xa0, 0x01, 0x0a, 0x15, 0x45, 0x76, 0x61, 0x63,
- 0x75, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
- 0x65, 0x12, 0x37, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x23, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61,
- 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e,
- 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69,
- 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e,
- 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72,
- 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x1c, 0x0a, 0x04,
- 0x42, 0x6f, 0x64, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x0d, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0xb1, 0x01, 0x0a, 0x11, 0x46,
- 0x6c, 0x75, 0x73, 0x68, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x12, 0x33, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f,
- 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x43, 0x61,
- 0x63, 0x68, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52,
- 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75,
- 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72,
- 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69,
- 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x35, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12,
- 0x19, 0x0a, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x49, 0x44, 0x18, 0x01, 0x20, 0x03, 0x28,
- 0x0c, 0x52, 0x07, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x44, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x65,
- 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x73, 0x65, 0x61, 0x6c, 0x22, 0x84,
- 0x01, 0x0a, 0x12, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x34, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x46, 0x6c,
- 0x75, 0x73, 0x68, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
- 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73,
- 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12,
- 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75,
- 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x06, 0x0a,
- 0x04, 0x42, 0x6f, 0x64, 0x79, 0x22, 0xc9, 0x01, 0x0a, 0x0d, 0x44, 0x6f, 0x63, 0x74, 0x6f, 0x72,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2f, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e,
- 0x44, 0x6f, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f,
- 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e,
- 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f,
- 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52,
- 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x55, 0x0a, 0x04, 0x42, 0x6f,
- 0x64, 0x79, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63,
- 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72,
- 0x65, 0x6e, 0x63, 0x79, 0x12, 0x2b, 0x0a, 0x11, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x5f, 0x64,
- 0x75, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52,
- 0x10, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x44, 0x75, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x65,
- 0x73, 0x22, 0x7c, 0x0a, 0x0e, 0x44, 0x6f, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f,
- 0x6e, 0x73, 0x65, 0x12, 0x30, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x1c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x44, 0x6f, 0x63, 0x74,
- 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52,
- 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75,
- 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72,
- 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69,
- 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x06, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x22,
- 0x98, 0x02, 0x0a, 0x1b, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76,
- 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
- 0x3d, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e,
- 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x68, 0x61,
- 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30,
- 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e,
- 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65,
- 0x1a, 0x87, 0x01, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x68, 0x61,
- 0x72, 0x64, 0x5f, 0x49, 0x44, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x07, 0x73, 0x68, 0x61,
- 0x72, 0x64, 0x49, 0x44, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x65,
- 0x72, 0x72, 0x6f, 0x72, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x69, 0x67, 0x6e,
- 0x6f, 0x72, 0x65, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x63, 0x6f,
- 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x22,
- 0x29, 0x0a, 0x05, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45,
- 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x53, 0x10, 0x01, 0x12,
- 0x09, 0x0a, 0x05, 0x54, 0x52, 0x45, 0x45, 0x53, 0x10, 0x02, 0x22, 0x98, 0x01, 0x0a, 0x1c, 0x53,
- 0x74, 0x61, 0x72, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3e, 0x0a, 0x04, 0x62,
- 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x63, 0x6f, 0x6e, 0x74,
- 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76,
- 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
- 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73,
- 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12,
- 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75,
- 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x06, 0x0a,
- 0x04, 0x42, 0x6f, 0x64, 0x79, 0x22, 0x9e, 0x01, 0x0a, 0x1f, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61,
- 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74,
- 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x41, 0x0a, 0x04, 0x62, 0x6f, 0x64,
- 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f,
- 0x6c, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09,
- 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74,
- 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x06,
- 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x22, 0x89, 0x07, 0x0a, 0x20, 0x47, 0x65, 0x74, 0x53, 0x68,
- 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61,
- 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x42, 0x0a, 0x04, 0x62,
- 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x63, 0x6f, 0x6e, 0x74,
- 0x72, 0x6f, 0x6c, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63,
- 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70,
- 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12,
- 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67,
- 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72,
- 0x65, 0x1a, 0xee, 0x05, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x23, 0x0a, 0x0d, 0x74, 0x6f,
- 0x74, 0x61, 0x6c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x04, 0x52, 0x0c, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12,
- 0x2b, 0x0a, 0x11, 0x65, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x62, 0x6a,
- 0x65, 0x63, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x65, 0x76, 0x61, 0x63,
- 0x75, 0x61, 0x74, 0x65, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x25, 0x0a, 0x0e,
- 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x18, 0x03,
- 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x4f, 0x62, 0x6a, 0x65,
- 0x63, 0x74, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x49, 0x44, 0x18,
- 0x04, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x07, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x44, 0x12, 0x4d,
- 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x35,
- 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72,
- 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75,
- 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x2e, 0x53,
- 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x53, 0x0a,
- 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x37, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61,
- 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74,
- 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x2e,
- 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x12, 0x5b, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74,
- 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c,
- 0x2e, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
- 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x2e, 0x55, 0x6e, 0x69, 0x78, 0x54, 0x69, 0x6d, 0x65, 0x73,
- 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12,
- 0x23, 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
- 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73,
- 0x73, 0x61, 0x67, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x6b, 0x69, 0x70, 0x70, 0x65, 0x64, 0x5f,
- 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, 0x73,
- 0x6b, 0x69, 0x70, 0x70, 0x65, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x1f, 0x0a,
- 0x0b, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x74, 0x72, 0x65, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x01,
- 0x28, 0x04, 0x52, 0x0a, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x54, 0x72, 0x65, 0x65, 0x73, 0x12, 0x27,
- 0x0a, 0x0f, 0x65, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x72, 0x65, 0x65,
- 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, 0x65, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74,
- 0x65, 0x64, 0x54, 0x72, 0x65, 0x65, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x61, 0x69, 0x6c, 0x65,
- 0x64, 0x5f, 0x74, 0x72, 0x65, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x66,
- 0x61, 0x69, 0x6c, 0x65, 0x64, 0x54, 0x72, 0x65, 0x65, 0x73, 0x1a, 0x25, 0x0a, 0x0d, 0x55, 0x6e,
- 0x69, 0x78, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x14, 0x0a, 0x05, 0x76,
- 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75,
- 0x65, 0x1a, 0x24, 0x0a, 0x08, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a,
- 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07,
- 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x22, 0x49, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75,
- 0x73, 0x12, 0x23, 0x0a, 0x1f, 0x45, 0x56, 0x41, 0x43, 0x55, 0x41, 0x54, 0x45, 0x5f, 0x53, 0x48,
- 0x41, 0x52, 0x44, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x55, 0x4e, 0x44, 0x45, 0x46,
- 0x49, 0x4e, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x52, 0x55, 0x4e, 0x4e, 0x49, 0x4e,
- 0x47, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x4d, 0x50, 0x4c, 0x45, 0x54, 0x45, 0x44,
- 0x10, 0x02, 0x22, 0xa2, 0x01, 0x0a, 0x21, 0x52, 0x65, 0x73, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72,
- 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75,
- 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x43, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c,
- 0x2e, 0x52, 0x65, 0x73, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a,
- 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61,
- 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a,
- 0x06, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x22, 0xa4, 0x01, 0x0a, 0x22, 0x52, 0x65, 0x73, 0x65,
- 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x44,
- 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x63,
- 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72,
- 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75,
- 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04,
- 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72,
- 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f,
- 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67,
- 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x06, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x22, 0x94,
- 0x01, 0x0a, 0x1a, 0x53, 0x74, 0x6f, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63,
- 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3c, 0x0a,
- 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x63, 0x6f,
- 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45,
- 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73,
- 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12,
- 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75,
- 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x06, 0x0a,
- 0x04, 0x42, 0x6f, 0x64, 0x79, 0x22, 0x96, 0x01, 0x0a, 0x1b, 0x53, 0x74, 0x6f, 0x70, 0x53, 0x68,
- 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3d, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x74,
- 0x6f, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04,
- 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72,
- 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f,
- 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67,
- 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x06, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x22, 0xdc,
- 0x01, 0x0a, 0x1c, 0x41, 0x64, 0x64, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c,
- 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
- 0x3e, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e,
- 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x41, 0x64, 0x64, 0x43, 0x68, 0x61, 0x69, 0x6e,
- 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12,
- 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67,
- 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72,
- 0x65, 0x1a, 0x4a, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x2c, 0x0a, 0x06, 0x74, 0x61, 0x72,
- 0x67, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x6f, 0x6e, 0x74,
- 0x72, 0x6f, 0x6c, 0x2e, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52,
- 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x68, 0x61, 0x69, 0x6e,
- 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x22, 0xb5, 0x01,
- 0x0a, 0x1d, 0x41, 0x64, 0x64, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f,
- 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
- 0x3f, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e,
- 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x41, 0x64, 0x64, 0x43, 0x68, 0x61, 0x69, 0x6e,
- 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79,
- 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69,
- 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75,
- 0x72, 0x65, 0x1a, 0x21, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68,
- 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68,
- 0x61, 0x69, 0x6e, 0x49, 0x64, 0x22, 0xe1, 0x01, 0x0a, 0x1c, 0x47, 0x65, 0x74, 0x43, 0x68, 0x61,
- 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3e, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x47,
- 0x65, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72,
- 0x72, 0x69, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79,
- 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74,
- 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74,
- 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73,
- 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x4f, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79,
- 0x12, 0x2c, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x14, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x43, 0x68, 0x61, 0x69, 0x6e,
- 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x19,
- 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c,
- 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x22, 0xb0, 0x01, 0x0a, 0x1d, 0x47, 0x65,
- 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72,
- 0x69, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3f, 0x0a, 0x04, 0x62,
- 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x63, 0x6f, 0x6e, 0x74,
- 0x72, 0x6f, 0x6c, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61,
- 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
- 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09,
- 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74,
- 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x1c,
- 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x22, 0xca, 0x01, 0x0a,
- 0x1e, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f,
- 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
- 0x40, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e,
- 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x68, 0x61, 0x69,
- 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64,
- 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53,
- 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74,
- 0x75, 0x72, 0x65, 0x1a, 0x34, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x2c, 0x0a, 0x06, 0x74,
- 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x6f,
- 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65,
- 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x22, 0xb6, 0x01, 0x0a, 0x1f, 0x4c, 0x69,
- 0x73, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72,
- 0x72, 0x69, 0x64, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x41, 0x0a,
- 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x63, 0x6f,
- 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c,
- 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79,
- 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69,
- 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75,
- 0x72, 0x65, 0x1a, 0x1e, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x63, 0x68,
- 0x61, 0x69, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x06, 0x63, 0x68, 0x61, 0x69,
- 0x6e, 0x73, 0x22, 0xbe, 0x01, 0x0a, 0x20, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65,
- 0x74, 0x73, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x42, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e,
- 0x4c, 0x69, 0x73, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x4c, 0x6f, 0x63, 0x61, 0x6c,
- 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73,
- 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12,
- 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75,
- 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x24, 0x0a,
- 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x1c, 0x0a, 0x09, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4e, 0x61,
- 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4e,
- 0x61, 0x6d, 0x65, 0x22, 0xd2, 0x01, 0x0a, 0x21, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x61, 0x72, 0x67,
- 0x65, 0x74, 0x73, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65,
- 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x43, 0x0a, 0x04, 0x62, 0x6f, 0x64,
- 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f,
- 0x6c, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x4c, 0x6f, 0x63,
- 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f,
- 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30,
- 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e,
- 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65,
- 0x1a, 0x36, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x2e, 0x0a, 0x07, 0x74, 0x61, 0x72, 0x67,
- 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x6f, 0x6e, 0x74,
- 0x72, 0x6f, 0x6c, 0x2e, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52,
- 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x22, 0xe7, 0x01, 0x0a, 0x1f, 0x52, 0x65, 0x6d,
- 0x6f, 0x76, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65,
- 0x72, 0x72, 0x69, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x41, 0x0a, 0x04,
- 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x63, 0x6f, 0x6e,
- 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e,
- 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12,
- 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67,
- 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72,
- 0x65, 0x1a, 0x4f, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x2c, 0x0a, 0x06, 0x74, 0x61, 0x72,
- 0x67, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x6f, 0x6e, 0x74,
- 0x72, 0x6f, 0x6c, 0x2e, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52,
- 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e,
- 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e,
- 0x49, 0x64, 0x22, 0xa0, 0x01, 0x0a, 0x20, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x43, 0x68, 0x61,
- 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x52,
- 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x42, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e,
- 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c,
- 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
- 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73,
- 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12,
- 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75,
- 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x06, 0x0a,
- 0x04, 0x42, 0x6f, 0x64, 0x79, 0x22, 0xde, 0x01, 0x0a, 0x28, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65,
- 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69,
- 0x64, 0x65, 0x73, 0x42, 0x79, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x12, 0x4a, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x36, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76,
- 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72,
- 0x69, 0x64, 0x65, 0x73, 0x42, 0x79, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30,
- 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e,
- 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65,
- 0x1a, 0x34, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x2c, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67,
- 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72,
- 0x6f, 0x6c, 0x2e, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06,
- 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x22, 0xb2, 0x01, 0x0a, 0x29, 0x52, 0x65, 0x6d, 0x6f, 0x76,
- 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72,
- 0x69, 0x64, 0x65, 0x73, 0x42, 0x79, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70,
- 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4b, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x52, 0x65, 0x6d,
- 0x6f, 0x76, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65,
- 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x42, 0x79, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x65,
- 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64,
- 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53,
- 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74,
- 0x75, 0x72, 0x65, 0x1a, 0x06, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x22, 0x9c, 0x02, 0x0a, 0x15,
- 0x53, 0x65, 0x61, 0x6c, 0x57, 0x72, 0x69, 0x74, 0x65, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x37, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x65,
- 0x61, 0x6c, 0x57, 0x72, 0x69, 0x74, 0x65, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30,
- 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e,
- 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65,
- 0x1a, 0x97, 0x01, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x68, 0x61,
- 0x72, 0x64, 0x5f, 0x49, 0x44, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x07, 0x73, 0x68, 0x61,
- 0x72, 0x64, 0x49, 0x44, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x65,
- 0x72, 0x72, 0x6f, 0x72, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x69, 0x67, 0x6e,
- 0x6f, 0x72, 0x65, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x61, 0x73, 0x79,
- 0x6e, 0x63, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x61, 0x73, 0x79, 0x6e, 0x63, 0x12,
- 0x21, 0x0a, 0x0c, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x18,
- 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4d, 0x6f,
- 0x64, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x68, 0x72, 0x69, 0x6e, 0x6b, 0x18, 0x05, 0x20, 0x01,
- 0x28, 0x08, 0x52, 0x06, 0x73, 0x68, 0x72, 0x69, 0x6e, 0x6b, 0x22, 0xa9, 0x02, 0x0a, 0x16, 0x53,
- 0x65, 0x61, 0x6c, 0x57, 0x72, 0x69, 0x74, 0x65, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x38, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x65,
- 0x61, 0x6c, 0x57, 0x72, 0x69, 0x74, 0x65, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x73, 0x70,
- 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12,
- 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67,
- 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72,
- 0x65, 0x1a, 0xa2, 0x01, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x45, 0x0a, 0x07, 0x72, 0x65,
- 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x63, 0x6f,
- 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x65, 0x61, 0x6c, 0x57, 0x72, 0x69, 0x74, 0x65, 0x43,
- 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64,
- 0x79, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74,
- 0x73, 0x1a, 0x53, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x73,
- 0x68, 0x61, 0x72, 0x64, 0x5f, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x73,
- 0x68, 0x61, 0x72, 0x64, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73,
- 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73,
- 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0xa1, 0x01, 0x0a, 0x13, 0x44, 0x65, 0x74, 0x61, 0x63,
- 0x68, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x35,
- 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x63,
- 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x44, 0x65, 0x74, 0x61, 0x63, 0x68, 0x53, 0x68, 0x61,
- 0x72, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52,
- 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75,
- 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72,
- 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69,
- 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x21, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12,
- 0x19, 0x0a, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x49, 0x44, 0x18, 0x01, 0x20, 0x03, 0x28,
- 0x0c, 0x52, 0x07, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x44, 0x22, 0x88, 0x01, 0x0a, 0x14, 0x44,
- 0x65, 0x74, 0x61, 0x63, 0x68, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f,
- 0x6e, 0x73, 0x65, 0x12, 0x36, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x22, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x44, 0x65, 0x74, 0x61,
- 0x63, 0x68, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
- 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73,
- 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12,
- 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75,
- 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x06, 0x0a,
- 0x04, 0x42, 0x6f, 0x64, 0x79, 0x32, 0x8b, 0x10, 0x0a, 0x0e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f,
- 0x6c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x48, 0x0a, 0x0b, 0x48, 0x65, 0x61, 0x6c,
- 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x1b, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f,
- 0x6c, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x48,
- 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x12, 0x54, 0x0a, 0x0f, 0x53, 0x65, 0x74, 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x53,
- 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e,
- 0x53, 0x65, 0x74, 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c,
- 0x2e, 0x53, 0x65, 0x74, 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x54, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x4e,
- 0x65, 0x74, 0x6d, 0x61, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1f, 0x2e, 0x63, 0x6f,
- 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x53,
- 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x63,
- 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70,
- 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x48,
- 0x0a, 0x0b, 0x44, 0x72, 0x6f, 0x70, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x1b, 0x2e,
- 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x44, 0x72, 0x6f, 0x70, 0x4f, 0x62, 0x6a, 0x65,
- 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x63, 0x6f, 0x6e,
- 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x44, 0x72, 0x6f, 0x70, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x45, 0x0a, 0x0a, 0x4c, 0x69, 0x73, 0x74,
- 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x1a, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c,
- 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x4c, 0x69, 0x73,
- 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
- 0x4b, 0x0a, 0x0c, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x4d, 0x6f, 0x64, 0x65, 0x12,
- 0x1c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61,
- 0x72, 0x64, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e,
- 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64,
- 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x54, 0x0a, 0x0f,
- 0x53, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x65, 0x54, 0x72, 0x65, 0x65, 0x12,
- 0x1f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x68, 0x72,
- 0x6f, 0x6e, 0x69, 0x7a, 0x65, 0x54, 0x72, 0x65, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x1a, 0x20, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x68,
- 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x65, 0x54, 0x72, 0x65, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x12, 0x4e, 0x0a, 0x0d, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x65, 0x53, 0x68,
- 0x61, 0x72, 0x64, 0x12, 0x1d, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x45, 0x76,
- 0x61, 0x63, 0x75, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x45, 0x76, 0x61,
- 0x63, 0x75, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x12, 0x63, 0x0a, 0x14, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64,
- 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x24, 0x2e, 0x63, 0x6f, 0x6e,
- 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45,
- 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x1a, 0x25, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74,
- 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52,
- 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6f, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x53, 0x68,
- 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61,
- 0x74, 0x75, 0x73, 0x12, 0x28, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x47, 0x65,
- 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e,
- 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64,
- 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x75, 0x0a, 0x1a, 0x52, 0x65, 0x73, 0x65,
- 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2a, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c,
- 0x2e, 0x52, 0x65, 0x73, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x52, 0x65, 0x73,
- 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
- 0x60, 0x0a, 0x13, 0x53, 0x74, 0x6f, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63,
- 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c,
- 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x63, 0x6f,
- 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45,
- 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
- 0x65, 0x12, 0x45, 0x0a, 0x0a, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x43, 0x61, 0x63, 0x68, 0x65, 0x12,
- 0x1a, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x43,
- 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x63, 0x6f,
- 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x43, 0x61, 0x63, 0x68, 0x65,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x39, 0x0a, 0x06, 0x44, 0x6f, 0x63, 0x74,
- 0x6f, 0x72, 0x12, 0x16, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x44, 0x6f, 0x63,
- 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x63, 0x6f, 0x6e,
- 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x44, 0x6f, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f,
- 0x6e, 0x73, 0x65, 0x12, 0x66, 0x0a, 0x15, 0x41, 0x64, 0x64, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c,
- 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x12, 0x25, 0x2e, 0x63,
- 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x41, 0x64, 0x64, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c,
- 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x41, 0x64,
- 0x64, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72,
- 0x69, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x66, 0x0a, 0x15, 0x47,
- 0x65, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72,
- 0x72, 0x69, 0x64, 0x65, 0x12, 0x25, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x47,
- 0x65, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72,
- 0x72, 0x69, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x63, 0x6f,
- 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f,
- 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f,
- 0x6e, 0x73, 0x65, 0x12, 0x6c, 0x0a, 0x17, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e,
- 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x12, 0x27,
- 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x68, 0x61,
- 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f,
- 0x6c, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c,
- 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
- 0x65, 0x12, 0x6f, 0x0a, 0x18, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e,
- 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x12, 0x28, 0x2e,
- 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x43, 0x68,
- 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f,
- 0x6c, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x63,
- 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x12, 0x8a, 0x01, 0x0a, 0x21, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x43, 0x68, 0x61,
- 0x69, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73,
- 0x42, 0x79, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72,
- 0x6f, 0x6c, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x6f,
- 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x42, 0x79, 0x54, 0x61,
- 0x72, 0x67, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x32, 0x2e, 0x63, 0x6f,
- 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x43, 0x68, 0x61, 0x69,
- 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x42,
- 0x79, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
- 0x72, 0x0a, 0x19, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x4c, 0x6f,
- 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x12, 0x29, 0x2e, 0x63,
- 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65,
- 0x74, 0x73, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f,
- 0x6c, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x4c, 0x6f, 0x63,
- 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f,
- 0x6e, 0x73, 0x65, 0x12, 0x51, 0x0a, 0x0e, 0x53, 0x65, 0x61, 0x6c, 0x57, 0x72, 0x69, 0x74, 0x65,
- 0x43, 0x61, 0x63, 0x68, 0x65, 0x12, 0x1e, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e,
- 0x53, 0x65, 0x61, 0x6c, 0x57, 0x72, 0x69, 0x74, 0x65, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e,
- 0x53, 0x65, 0x61, 0x6c, 0x57, 0x72, 0x69, 0x74, 0x65, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65,
- 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4b, 0x0a, 0x0c, 0x44, 0x65, 0x74, 0x61, 0x63, 0x68,
- 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x1c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c,
- 0x2e, 0x44, 0x65, 0x74, 0x61, 0x63, 0x68, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x44,
- 0x65, 0x74, 0x61, 0x63, 0x68, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f,
- 0x6e, 0x73, 0x65, 0x42, 0x41, 0x5a, 0x3f, 0x67, 0x69, 0x74, 0x2e, 0x66, 0x72, 0x6f, 0x73, 0x74,
- 0x66, 0x73, 0x2e, 0x69, 0x6e, 0x66, 0x6f, 0x2f, 0x54, 0x72, 0x75, 0x65, 0x43, 0x6c, 0x6f, 0x75,
- 0x64, 0x4c, 0x61, 0x62, 0x2f, 0x66, 0x72, 0x6f, 0x73, 0x74, 0x66, 0x73, 0x2d, 0x6e, 0x6f, 0x64,
- 0x65, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x63,
- 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
-
-var (
- file_pkg_services_control_service_proto_rawDescOnce sync.Once
- file_pkg_services_control_service_proto_rawDescData = file_pkg_services_control_service_proto_rawDesc
-)
-
-func file_pkg_services_control_service_proto_rawDescGZIP() []byte {
- file_pkg_services_control_service_proto_rawDescOnce.Do(func() {
- file_pkg_services_control_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_pkg_services_control_service_proto_rawDescData)
- })
- return file_pkg_services_control_service_proto_rawDescData
-}
-
-var file_pkg_services_control_service_proto_enumTypes = make([]protoimpl.EnumInfo, 2)
-var file_pkg_services_control_service_proto_msgTypes = make([]protoimpl.MessageInfo, 91)
-var file_pkg_services_control_service_proto_goTypes = []interface{}{
- (StartShardEvacuationRequest_Body_Scope)(0), // 0: control.StartShardEvacuationRequest.Body.Scope
- (GetShardEvacuationStatusResponse_Body_Status)(0), // 1: control.GetShardEvacuationStatusResponse.Body.Status
- (*HealthCheckRequest)(nil), // 2: control.HealthCheckRequest
- (*HealthCheckResponse)(nil), // 3: control.HealthCheckResponse
- (*SetNetmapStatusRequest)(nil), // 4: control.SetNetmapStatusRequest
- (*SetNetmapStatusResponse)(nil), // 5: control.SetNetmapStatusResponse
- (*GetNetmapStatusRequest)(nil), // 6: control.GetNetmapStatusRequest
- (*GetNetmapStatusResponse)(nil), // 7: control.GetNetmapStatusResponse
- (*DropObjectsRequest)(nil), // 8: control.DropObjectsRequest
- (*DropObjectsResponse)(nil), // 9: control.DropObjectsResponse
- (*ListShardsRequest)(nil), // 10: control.ListShardsRequest
- (*ListShardsResponse)(nil), // 11: control.ListShardsResponse
- (*SetShardModeRequest)(nil), // 12: control.SetShardModeRequest
- (*SetShardModeResponse)(nil), // 13: control.SetShardModeResponse
- (*SynchronizeTreeRequest)(nil), // 14: control.SynchronizeTreeRequest
- (*SynchronizeTreeResponse)(nil), // 15: control.SynchronizeTreeResponse
- (*EvacuateShardRequest)(nil), // 16: control.EvacuateShardRequest
- (*EvacuateShardResponse)(nil), // 17: control.EvacuateShardResponse
- (*FlushCacheRequest)(nil), // 18: control.FlushCacheRequest
- (*FlushCacheResponse)(nil), // 19: control.FlushCacheResponse
- (*DoctorRequest)(nil), // 20: control.DoctorRequest
- (*DoctorResponse)(nil), // 21: control.DoctorResponse
- (*StartShardEvacuationRequest)(nil), // 22: control.StartShardEvacuationRequest
- (*StartShardEvacuationResponse)(nil), // 23: control.StartShardEvacuationResponse
- (*GetShardEvacuationStatusRequest)(nil), // 24: control.GetShardEvacuationStatusRequest
- (*GetShardEvacuationStatusResponse)(nil), // 25: control.GetShardEvacuationStatusResponse
- (*ResetShardEvacuationStatusRequest)(nil), // 26: control.ResetShardEvacuationStatusRequest
- (*ResetShardEvacuationStatusResponse)(nil), // 27: control.ResetShardEvacuationStatusResponse
- (*StopShardEvacuationRequest)(nil), // 28: control.StopShardEvacuationRequest
- (*StopShardEvacuationResponse)(nil), // 29: control.StopShardEvacuationResponse
- (*AddChainLocalOverrideRequest)(nil), // 30: control.AddChainLocalOverrideRequest
- (*AddChainLocalOverrideResponse)(nil), // 31: control.AddChainLocalOverrideResponse
- (*GetChainLocalOverrideRequest)(nil), // 32: control.GetChainLocalOverrideRequest
- (*GetChainLocalOverrideResponse)(nil), // 33: control.GetChainLocalOverrideResponse
- (*ListChainLocalOverridesRequest)(nil), // 34: control.ListChainLocalOverridesRequest
- (*ListChainLocalOverridesResponse)(nil), // 35: control.ListChainLocalOverridesResponse
- (*ListTargetsLocalOverridesRequest)(nil), // 36: control.ListTargetsLocalOverridesRequest
- (*ListTargetsLocalOverridesResponse)(nil), // 37: control.ListTargetsLocalOverridesResponse
- (*RemoveChainLocalOverrideRequest)(nil), // 38: control.RemoveChainLocalOverrideRequest
- (*RemoveChainLocalOverrideResponse)(nil), // 39: control.RemoveChainLocalOverrideResponse
- (*RemoveChainLocalOverridesByTargetRequest)(nil), // 40: control.RemoveChainLocalOverridesByTargetRequest
- (*RemoveChainLocalOverridesByTargetResponse)(nil), // 41: control.RemoveChainLocalOverridesByTargetResponse
- (*SealWriteCacheRequest)(nil), // 42: control.SealWriteCacheRequest
- (*SealWriteCacheResponse)(nil), // 43: control.SealWriteCacheResponse
- (*DetachShardsRequest)(nil), // 44: control.DetachShardsRequest
- (*DetachShardsResponse)(nil), // 45: control.DetachShardsResponse
- (*HealthCheckRequest_Body)(nil), // 46: control.HealthCheckRequest.Body
- (*HealthCheckResponse_Body)(nil), // 47: control.HealthCheckResponse.Body
- (*SetNetmapStatusRequest_Body)(nil), // 48: control.SetNetmapStatusRequest.Body
- (*SetNetmapStatusResponse_Body)(nil), // 49: control.SetNetmapStatusResponse.Body
- (*GetNetmapStatusRequest_Body)(nil), // 50: control.GetNetmapStatusRequest.Body
- (*GetNetmapStatusResponse_Body)(nil), // 51: control.GetNetmapStatusResponse.Body
- (*DropObjectsRequest_Body)(nil), // 52: control.DropObjectsRequest.Body
- (*DropObjectsResponse_Body)(nil), // 53: control.DropObjectsResponse.Body
- (*ListShardsRequest_Body)(nil), // 54: control.ListShardsRequest.Body
- (*ListShardsResponse_Body)(nil), // 55: control.ListShardsResponse.Body
- (*SetShardModeRequest_Body)(nil), // 56: control.SetShardModeRequest.Body
- (*SetShardModeResponse_Body)(nil), // 57: control.SetShardModeResponse.Body
- (*SynchronizeTreeRequest_Body)(nil), // 58: control.SynchronizeTreeRequest.Body
- (*SynchronizeTreeResponse_Body)(nil), // 59: control.SynchronizeTreeResponse.Body
- (*EvacuateShardRequest_Body)(nil), // 60: control.EvacuateShardRequest.Body
- (*EvacuateShardResponse_Body)(nil), // 61: control.EvacuateShardResponse.Body
- (*FlushCacheRequest_Body)(nil), // 62: control.FlushCacheRequest.Body
- (*FlushCacheResponse_Body)(nil), // 63: control.FlushCacheResponse.Body
- (*DoctorRequest_Body)(nil), // 64: control.DoctorRequest.Body
- (*DoctorResponse_Body)(nil), // 65: control.DoctorResponse.Body
- (*StartShardEvacuationRequest_Body)(nil), // 66: control.StartShardEvacuationRequest.Body
- (*StartShardEvacuationResponse_Body)(nil), // 67: control.StartShardEvacuationResponse.Body
- (*GetShardEvacuationStatusRequest_Body)(nil), // 68: control.GetShardEvacuationStatusRequest.Body
- (*GetShardEvacuationStatusResponse_Body)(nil), // 69: control.GetShardEvacuationStatusResponse.Body
- (*GetShardEvacuationStatusResponse_Body_UnixTimestamp)(nil), // 70: control.GetShardEvacuationStatusResponse.Body.UnixTimestamp
- (*GetShardEvacuationStatusResponse_Body_Duration)(nil), // 71: control.GetShardEvacuationStatusResponse.Body.Duration
- (*ResetShardEvacuationStatusRequest_Body)(nil), // 72: control.ResetShardEvacuationStatusRequest.Body
- (*ResetShardEvacuationStatusResponse_Body)(nil), // 73: control.ResetShardEvacuationStatusResponse.Body
- (*StopShardEvacuationRequest_Body)(nil), // 74: control.StopShardEvacuationRequest.Body
- (*StopShardEvacuationResponse_Body)(nil), // 75: control.StopShardEvacuationResponse.Body
- (*AddChainLocalOverrideRequest_Body)(nil), // 76: control.AddChainLocalOverrideRequest.Body
- (*AddChainLocalOverrideResponse_Body)(nil), // 77: control.AddChainLocalOverrideResponse.Body
- (*GetChainLocalOverrideRequest_Body)(nil), // 78: control.GetChainLocalOverrideRequest.Body
- (*GetChainLocalOverrideResponse_Body)(nil), // 79: control.GetChainLocalOverrideResponse.Body
- (*ListChainLocalOverridesRequest_Body)(nil), // 80: control.ListChainLocalOverridesRequest.Body
- (*ListChainLocalOverridesResponse_Body)(nil), // 81: control.ListChainLocalOverridesResponse.Body
- (*ListTargetsLocalOverridesRequest_Body)(nil), // 82: control.ListTargetsLocalOverridesRequest.Body
- (*ListTargetsLocalOverridesResponse_Body)(nil), // 83: control.ListTargetsLocalOverridesResponse.Body
- (*RemoveChainLocalOverrideRequest_Body)(nil), // 84: control.RemoveChainLocalOverrideRequest.Body
- (*RemoveChainLocalOverrideResponse_Body)(nil), // 85: control.RemoveChainLocalOverrideResponse.Body
- (*RemoveChainLocalOverridesByTargetRequest_Body)(nil), // 86: control.RemoveChainLocalOverridesByTargetRequest.Body
- (*RemoveChainLocalOverridesByTargetResponse_Body)(nil), // 87: control.RemoveChainLocalOverridesByTargetResponse.Body
- (*SealWriteCacheRequest_Body)(nil), // 88: control.SealWriteCacheRequest.Body
- (*SealWriteCacheResponse_Body)(nil), // 89: control.SealWriteCacheResponse.Body
- (*SealWriteCacheResponse_Body_Status)(nil), // 90: control.SealWriteCacheResponse.Body.Status
- (*DetachShardsRequest_Body)(nil), // 91: control.DetachShardsRequest.Body
- (*DetachShardsResponse_Body)(nil), // 92: control.DetachShardsResponse.Body
- (*Signature)(nil), // 93: control.Signature
- (NetmapStatus)(0), // 94: control.NetmapStatus
- (HealthStatus)(0), // 95: control.HealthStatus
- (*ShardInfo)(nil), // 96: control.ShardInfo
- (ShardMode)(0), // 97: control.ShardMode
- (*ChainTarget)(nil), // 98: control.ChainTarget
-}
-var file_pkg_services_control_service_proto_depIdxs = []int32{
- 46, // 0: control.HealthCheckRequest.body:type_name -> control.HealthCheckRequest.Body
- 93, // 1: control.HealthCheckRequest.signature:type_name -> control.Signature
- 47, // 2: control.HealthCheckResponse.body:type_name -> control.HealthCheckResponse.Body
- 93, // 3: control.HealthCheckResponse.signature:type_name -> control.Signature
- 48, // 4: control.SetNetmapStatusRequest.body:type_name -> control.SetNetmapStatusRequest.Body
- 93, // 5: control.SetNetmapStatusRequest.signature:type_name -> control.Signature
- 49, // 6: control.SetNetmapStatusResponse.body:type_name -> control.SetNetmapStatusResponse.Body
- 93, // 7: control.SetNetmapStatusResponse.signature:type_name -> control.Signature
- 50, // 8: control.GetNetmapStatusRequest.body:type_name -> control.GetNetmapStatusRequest.Body
- 93, // 9: control.GetNetmapStatusRequest.signature:type_name -> control.Signature
- 51, // 10: control.GetNetmapStatusResponse.body:type_name -> control.GetNetmapStatusResponse.Body
- 93, // 11: control.GetNetmapStatusResponse.signature:type_name -> control.Signature
- 52, // 12: control.DropObjectsRequest.body:type_name -> control.DropObjectsRequest.Body
- 93, // 13: control.DropObjectsRequest.signature:type_name -> control.Signature
- 53, // 14: control.DropObjectsResponse.body:type_name -> control.DropObjectsResponse.Body
- 93, // 15: control.DropObjectsResponse.signature:type_name -> control.Signature
- 54, // 16: control.ListShardsRequest.body:type_name -> control.ListShardsRequest.Body
- 93, // 17: control.ListShardsRequest.signature:type_name -> control.Signature
- 55, // 18: control.ListShardsResponse.body:type_name -> control.ListShardsResponse.Body
- 93, // 19: control.ListShardsResponse.signature:type_name -> control.Signature
- 56, // 20: control.SetShardModeRequest.body:type_name -> control.SetShardModeRequest.Body
- 93, // 21: control.SetShardModeRequest.signature:type_name -> control.Signature
- 57, // 22: control.SetShardModeResponse.body:type_name -> control.SetShardModeResponse.Body
- 93, // 23: control.SetShardModeResponse.signature:type_name -> control.Signature
- 58, // 24: control.SynchronizeTreeRequest.body:type_name -> control.SynchronizeTreeRequest.Body
- 93, // 25: control.SynchronizeTreeRequest.signature:type_name -> control.Signature
- 59, // 26: control.SynchronizeTreeResponse.body:type_name -> control.SynchronizeTreeResponse.Body
- 93, // 27: control.SynchronizeTreeResponse.signature:type_name -> control.Signature
- 60, // 28: control.EvacuateShardRequest.body:type_name -> control.EvacuateShardRequest.Body
- 93, // 29: control.EvacuateShardRequest.signature:type_name -> control.Signature
- 61, // 30: control.EvacuateShardResponse.body:type_name -> control.EvacuateShardResponse.Body
- 93, // 31: control.EvacuateShardResponse.signature:type_name -> control.Signature
- 62, // 32: control.FlushCacheRequest.body:type_name -> control.FlushCacheRequest.Body
- 93, // 33: control.FlushCacheRequest.signature:type_name -> control.Signature
- 63, // 34: control.FlushCacheResponse.body:type_name -> control.FlushCacheResponse.Body
- 93, // 35: control.FlushCacheResponse.signature:type_name -> control.Signature
- 64, // 36: control.DoctorRequest.body:type_name -> control.DoctorRequest.Body
- 93, // 37: control.DoctorRequest.signature:type_name -> control.Signature
- 65, // 38: control.DoctorResponse.body:type_name -> control.DoctorResponse.Body
- 93, // 39: control.DoctorResponse.signature:type_name -> control.Signature
- 66, // 40: control.StartShardEvacuationRequest.body:type_name -> control.StartShardEvacuationRequest.Body
- 93, // 41: control.StartShardEvacuationRequest.signature:type_name -> control.Signature
- 67, // 42: control.StartShardEvacuationResponse.body:type_name -> control.StartShardEvacuationResponse.Body
- 93, // 43: control.StartShardEvacuationResponse.signature:type_name -> control.Signature
- 68, // 44: control.GetShardEvacuationStatusRequest.body:type_name -> control.GetShardEvacuationStatusRequest.Body
- 93, // 45: control.GetShardEvacuationStatusRequest.signature:type_name -> control.Signature
- 69, // 46: control.GetShardEvacuationStatusResponse.body:type_name -> control.GetShardEvacuationStatusResponse.Body
- 93, // 47: control.GetShardEvacuationStatusResponse.signature:type_name -> control.Signature
- 72, // 48: control.ResetShardEvacuationStatusRequest.body:type_name -> control.ResetShardEvacuationStatusRequest.Body
- 93, // 49: control.ResetShardEvacuationStatusRequest.signature:type_name -> control.Signature
- 73, // 50: control.ResetShardEvacuationStatusResponse.body:type_name -> control.ResetShardEvacuationStatusResponse.Body
- 93, // 51: control.ResetShardEvacuationStatusResponse.signature:type_name -> control.Signature
- 74, // 52: control.StopShardEvacuationRequest.body:type_name -> control.StopShardEvacuationRequest.Body
- 93, // 53: control.StopShardEvacuationRequest.signature:type_name -> control.Signature
- 75, // 54: control.StopShardEvacuationResponse.body:type_name -> control.StopShardEvacuationResponse.Body
- 93, // 55: control.StopShardEvacuationResponse.signature:type_name -> control.Signature
- 76, // 56: control.AddChainLocalOverrideRequest.body:type_name -> control.AddChainLocalOverrideRequest.Body
- 93, // 57: control.AddChainLocalOverrideRequest.signature:type_name -> control.Signature
- 77, // 58: control.AddChainLocalOverrideResponse.body:type_name -> control.AddChainLocalOverrideResponse.Body
- 93, // 59: control.AddChainLocalOverrideResponse.signature:type_name -> control.Signature
- 78, // 60: control.GetChainLocalOverrideRequest.body:type_name -> control.GetChainLocalOverrideRequest.Body
- 93, // 61: control.GetChainLocalOverrideRequest.signature:type_name -> control.Signature
- 79, // 62: control.GetChainLocalOverrideResponse.body:type_name -> control.GetChainLocalOverrideResponse.Body
- 93, // 63: control.GetChainLocalOverrideResponse.signature:type_name -> control.Signature
- 80, // 64: control.ListChainLocalOverridesRequest.body:type_name -> control.ListChainLocalOverridesRequest.Body
- 93, // 65: control.ListChainLocalOverridesRequest.signature:type_name -> control.Signature
- 81, // 66: control.ListChainLocalOverridesResponse.body:type_name -> control.ListChainLocalOverridesResponse.Body
- 93, // 67: control.ListChainLocalOverridesResponse.signature:type_name -> control.Signature
- 82, // 68: control.ListTargetsLocalOverridesRequest.body:type_name -> control.ListTargetsLocalOverridesRequest.Body
- 93, // 69: control.ListTargetsLocalOverridesRequest.signature:type_name -> control.Signature
- 83, // 70: control.ListTargetsLocalOverridesResponse.body:type_name -> control.ListTargetsLocalOverridesResponse.Body
- 93, // 71: control.ListTargetsLocalOverridesResponse.signature:type_name -> control.Signature
- 84, // 72: control.RemoveChainLocalOverrideRequest.body:type_name -> control.RemoveChainLocalOverrideRequest.Body
- 93, // 73: control.RemoveChainLocalOverrideRequest.signature:type_name -> control.Signature
- 85, // 74: control.RemoveChainLocalOverrideResponse.body:type_name -> control.RemoveChainLocalOverrideResponse.Body
- 93, // 75: control.RemoveChainLocalOverrideResponse.signature:type_name -> control.Signature
- 86, // 76: control.RemoveChainLocalOverridesByTargetRequest.body:type_name -> control.RemoveChainLocalOverridesByTargetRequest.Body
- 93, // 77: control.RemoveChainLocalOverridesByTargetRequest.signature:type_name -> control.Signature
- 87, // 78: control.RemoveChainLocalOverridesByTargetResponse.body:type_name -> control.RemoveChainLocalOverridesByTargetResponse.Body
- 93, // 79: control.RemoveChainLocalOverridesByTargetResponse.signature:type_name -> control.Signature
- 88, // 80: control.SealWriteCacheRequest.body:type_name -> control.SealWriteCacheRequest.Body
- 93, // 81: control.SealWriteCacheRequest.signature:type_name -> control.Signature
- 89, // 82: control.SealWriteCacheResponse.body:type_name -> control.SealWriteCacheResponse.Body
- 93, // 83: control.SealWriteCacheResponse.signature:type_name -> control.Signature
- 91, // 84: control.DetachShardsRequest.body:type_name -> control.DetachShardsRequest.Body
- 93, // 85: control.DetachShardsRequest.signature:type_name -> control.Signature
- 92, // 86: control.DetachShardsResponse.body:type_name -> control.DetachShardsResponse.Body
- 93, // 87: control.DetachShardsResponse.signature:type_name -> control.Signature
- 94, // 88: control.HealthCheckResponse.Body.netmap_status:type_name -> control.NetmapStatus
- 95, // 89: control.HealthCheckResponse.Body.health_status:type_name -> control.HealthStatus
- 94, // 90: control.SetNetmapStatusRequest.Body.status:type_name -> control.NetmapStatus
- 94, // 91: control.GetNetmapStatusResponse.Body.status:type_name -> control.NetmapStatus
- 96, // 92: control.ListShardsResponse.Body.shards:type_name -> control.ShardInfo
- 97, // 93: control.SetShardModeRequest.Body.mode:type_name -> control.ShardMode
- 1, // 94: control.GetShardEvacuationStatusResponse.Body.status:type_name -> control.GetShardEvacuationStatusResponse.Body.Status
- 71, // 95: control.GetShardEvacuationStatusResponse.Body.duration:type_name -> control.GetShardEvacuationStatusResponse.Body.Duration
- 70, // 96: control.GetShardEvacuationStatusResponse.Body.started_at:type_name -> control.GetShardEvacuationStatusResponse.Body.UnixTimestamp
- 98, // 97: control.AddChainLocalOverrideRequest.Body.target:type_name -> control.ChainTarget
- 98, // 98: control.GetChainLocalOverrideRequest.Body.target:type_name -> control.ChainTarget
- 98, // 99: control.ListChainLocalOverridesRequest.Body.target:type_name -> control.ChainTarget
- 98, // 100: control.ListTargetsLocalOverridesResponse.Body.targets:type_name -> control.ChainTarget
- 98, // 101: control.RemoveChainLocalOverrideRequest.Body.target:type_name -> control.ChainTarget
- 98, // 102: control.RemoveChainLocalOverridesByTargetRequest.Body.target:type_name -> control.ChainTarget
- 90, // 103: control.SealWriteCacheResponse.Body.results:type_name -> control.SealWriteCacheResponse.Body.Status
- 2, // 104: control.ControlService.HealthCheck:input_type -> control.HealthCheckRequest
- 4, // 105: control.ControlService.SetNetmapStatus:input_type -> control.SetNetmapStatusRequest
- 6, // 106: control.ControlService.GetNetmapStatus:input_type -> control.GetNetmapStatusRequest
- 8, // 107: control.ControlService.DropObjects:input_type -> control.DropObjectsRequest
- 10, // 108: control.ControlService.ListShards:input_type -> control.ListShardsRequest
- 12, // 109: control.ControlService.SetShardMode:input_type -> control.SetShardModeRequest
- 14, // 110: control.ControlService.SynchronizeTree:input_type -> control.SynchronizeTreeRequest
- 16, // 111: control.ControlService.EvacuateShard:input_type -> control.EvacuateShardRequest
- 22, // 112: control.ControlService.StartShardEvacuation:input_type -> control.StartShardEvacuationRequest
- 24, // 113: control.ControlService.GetShardEvacuationStatus:input_type -> control.GetShardEvacuationStatusRequest
- 26, // 114: control.ControlService.ResetShardEvacuationStatus:input_type -> control.ResetShardEvacuationStatusRequest
- 28, // 115: control.ControlService.StopShardEvacuation:input_type -> control.StopShardEvacuationRequest
- 18, // 116: control.ControlService.FlushCache:input_type -> control.FlushCacheRequest
- 20, // 117: control.ControlService.Doctor:input_type -> control.DoctorRequest
- 30, // 118: control.ControlService.AddChainLocalOverride:input_type -> control.AddChainLocalOverrideRequest
- 32, // 119: control.ControlService.GetChainLocalOverride:input_type -> control.GetChainLocalOverrideRequest
- 34, // 120: control.ControlService.ListChainLocalOverrides:input_type -> control.ListChainLocalOverridesRequest
- 38, // 121: control.ControlService.RemoveChainLocalOverride:input_type -> control.RemoveChainLocalOverrideRequest
- 40, // 122: control.ControlService.RemoveChainLocalOverridesByTarget:input_type -> control.RemoveChainLocalOverridesByTargetRequest
- 36, // 123: control.ControlService.ListTargetsLocalOverrides:input_type -> control.ListTargetsLocalOverridesRequest
- 42, // 124: control.ControlService.SealWriteCache:input_type -> control.SealWriteCacheRequest
- 44, // 125: control.ControlService.DetachShards:input_type -> control.DetachShardsRequest
- 3, // 126: control.ControlService.HealthCheck:output_type -> control.HealthCheckResponse
- 5, // 127: control.ControlService.SetNetmapStatus:output_type -> control.SetNetmapStatusResponse
- 7, // 128: control.ControlService.GetNetmapStatus:output_type -> control.GetNetmapStatusResponse
- 9, // 129: control.ControlService.DropObjects:output_type -> control.DropObjectsResponse
- 11, // 130: control.ControlService.ListShards:output_type -> control.ListShardsResponse
- 13, // 131: control.ControlService.SetShardMode:output_type -> control.SetShardModeResponse
- 15, // 132: control.ControlService.SynchronizeTree:output_type -> control.SynchronizeTreeResponse
- 17, // 133: control.ControlService.EvacuateShard:output_type -> control.EvacuateShardResponse
- 23, // 134: control.ControlService.StartShardEvacuation:output_type -> control.StartShardEvacuationResponse
- 25, // 135: control.ControlService.GetShardEvacuationStatus:output_type -> control.GetShardEvacuationStatusResponse
- 27, // 136: control.ControlService.ResetShardEvacuationStatus:output_type -> control.ResetShardEvacuationStatusResponse
- 29, // 137: control.ControlService.StopShardEvacuation:output_type -> control.StopShardEvacuationResponse
- 19, // 138: control.ControlService.FlushCache:output_type -> control.FlushCacheResponse
- 21, // 139: control.ControlService.Doctor:output_type -> control.DoctorResponse
- 31, // 140: control.ControlService.AddChainLocalOverride:output_type -> control.AddChainLocalOverrideResponse
- 33, // 141: control.ControlService.GetChainLocalOverride:output_type -> control.GetChainLocalOverrideResponse
- 35, // 142: control.ControlService.ListChainLocalOverrides:output_type -> control.ListChainLocalOverridesResponse
- 39, // 143: control.ControlService.RemoveChainLocalOverride:output_type -> control.RemoveChainLocalOverrideResponse
- 41, // 144: control.ControlService.RemoveChainLocalOverridesByTarget:output_type -> control.RemoveChainLocalOverridesByTargetResponse
- 37, // 145: control.ControlService.ListTargetsLocalOverrides:output_type -> control.ListTargetsLocalOverridesResponse
- 43, // 146: control.ControlService.SealWriteCache:output_type -> control.SealWriteCacheResponse
- 45, // 147: control.ControlService.DetachShards:output_type -> control.DetachShardsResponse
- 126, // [126:148] is the sub-list for method output_type
- 104, // [104:126] is the sub-list for method input_type
- 104, // [104:104] is the sub-list for extension type_name
- 104, // [104:104] is the sub-list for extension extendee
- 0, // [0:104] is the sub-list for field type_name
-}
-
-func init() { file_pkg_services_control_service_proto_init() }
-func file_pkg_services_control_service_proto_init() {
- if File_pkg_services_control_service_proto != nil {
- return
- }
- file_pkg_services_control_types_proto_init()
- if !protoimpl.UnsafeEnabled {
- file_pkg_services_control_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*HealthCheckRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*HealthCheckResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SetNetmapStatusRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SetNetmapStatusResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetNetmapStatusRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetNetmapStatusResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*DropObjectsRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*DropObjectsResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ListShardsRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ListShardsResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SetShardModeRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SetShardModeResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SynchronizeTreeRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SynchronizeTreeResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*EvacuateShardRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*EvacuateShardResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*FlushCacheRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*FlushCacheResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*DoctorRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*DoctorResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*StartShardEvacuationRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*StartShardEvacuationResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetShardEvacuationStatusRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetShardEvacuationStatusResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ResetShardEvacuationStatusRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ResetShardEvacuationStatusResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*StopShardEvacuationRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*StopShardEvacuationResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*AddChainLocalOverrideRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*AddChainLocalOverrideResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetChainLocalOverrideRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetChainLocalOverrideResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ListChainLocalOverridesRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ListChainLocalOverridesResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ListTargetsLocalOverridesRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ListTargetsLocalOverridesResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RemoveChainLocalOverrideRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RemoveChainLocalOverrideResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RemoveChainLocalOverridesByTargetRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RemoveChainLocalOverridesByTargetResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SealWriteCacheRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SealWriteCacheResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*DetachShardsRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*DetachShardsResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*HealthCheckRequest_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*HealthCheckResponse_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SetNetmapStatusRequest_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SetNetmapStatusResponse_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetNetmapStatusRequest_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetNetmapStatusResponse_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*DropObjectsRequest_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*DropObjectsResponse_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ListShardsRequest_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ListShardsResponse_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SetShardModeRequest_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SetShardModeResponse_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SynchronizeTreeRequest_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SynchronizeTreeResponse_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*EvacuateShardRequest_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*EvacuateShardResponse_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[60].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*FlushCacheRequest_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[61].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*FlushCacheResponse_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[62].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*DoctorRequest_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[63].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*DoctorResponse_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[64].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*StartShardEvacuationRequest_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[65].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*StartShardEvacuationResponse_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[66].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetShardEvacuationStatusRequest_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[67].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetShardEvacuationStatusResponse_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[68].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetShardEvacuationStatusResponse_Body_UnixTimestamp); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[69].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetShardEvacuationStatusResponse_Body_Duration); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[70].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ResetShardEvacuationStatusRequest_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[71].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ResetShardEvacuationStatusResponse_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[72].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*StopShardEvacuationRequest_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[73].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*StopShardEvacuationResponse_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[74].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*AddChainLocalOverrideRequest_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[75].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*AddChainLocalOverrideResponse_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[76].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetChainLocalOverrideRequest_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[77].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetChainLocalOverrideResponse_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[78].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ListChainLocalOverridesRequest_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[79].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ListChainLocalOverridesResponse_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[80].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ListTargetsLocalOverridesRequest_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[81].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ListTargetsLocalOverridesResponse_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[82].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RemoveChainLocalOverrideRequest_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[83].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RemoveChainLocalOverrideResponse_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[84].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RemoveChainLocalOverridesByTargetRequest_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[85].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RemoveChainLocalOverridesByTargetResponse_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[86].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SealWriteCacheRequest_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[87].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SealWriteCacheResponse_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[88].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SealWriteCacheResponse_Body_Status); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[89].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*DetachShardsRequest_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[90].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*DetachShardsResponse_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
- type x struct{}
- out := protoimpl.TypeBuilder{
- File: protoimpl.DescBuilder{
- GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_pkg_services_control_service_proto_rawDesc,
- NumEnums: 2,
- NumMessages: 91,
- NumExtensions: 0,
- NumServices: 1,
- },
- GoTypes: file_pkg_services_control_service_proto_goTypes,
- DependencyIndexes: file_pkg_services_control_service_proto_depIdxs,
- EnumInfos: file_pkg_services_control_service_proto_enumTypes,
- MessageInfos: file_pkg_services_control_service_proto_msgTypes,
- }.Build()
- File_pkg_services_control_service_proto = out.File
- file_pkg_services_control_service_proto_rawDesc = nil
- file_pkg_services_control_service_proto_goTypes = nil
- file_pkg_services_control_service_proto_depIdxs = nil
-}
diff --git a/pkg/services/control/service_frostfs.pb.go b/pkg/services/control/service_frostfs.pb.go
index 822244e77..afd1c3c41 100644
--- a/pkg/services/control/service_frostfs.pb.go
+++ b/pkg/services/control/service_frostfs.pb.go
@@ -2,7 +2,27 @@
package control
-import "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/proto"
+import (
+ json "encoding/json"
+ fmt "fmt"
+ pool "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/pool"
+ proto "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/proto"
+ encoding "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/proto/encoding"
+ easyproto "github.com/VictoriaMetrics/easyproto"
+ jlexer "github.com/mailru/easyjson/jlexer"
+ jwriter "github.com/mailru/easyjson/jwriter"
+ strconv "strconv"
+)
+
+type HealthCheckRequest_Body struct {
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*HealthCheckRequest_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*HealthCheckRequest_Body)(nil)
+ _ json.Marshaler = (*HealthCheckRequest_Body)(nil)
+ _ json.Unmarshaler = (*HealthCheckRequest_Body)(nil)
+)
// StableSize returns the size of x in protobuf format.
//
@@ -14,18 +34,93 @@ func (x *HealthCheckRequest_Body) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *HealthCheckRequest_Body) StableMarshal(buf []byte) []byte {
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *HealthCheckRequest_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *HealthCheckRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *HealthCheckRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "HealthCheckRequest_Body")
+ }
+ switch fc.FieldNum {
+ }
+ }
+ return nil
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *HealthCheckRequest_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *HealthCheckRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *HealthCheckRequest_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *HealthCheckRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type HealthCheckRequest struct {
+ Body *HealthCheckRequest_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*HealthCheckRequest)(nil)
+ _ encoding.ProtoUnmarshaler = (*HealthCheckRequest)(nil)
+ _ json.Marshaler = (*HealthCheckRequest)(nil)
+ _ json.Unmarshaler = (*HealthCheckRequest)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -38,27 +133,6 @@ func (x *HealthCheckRequest) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *HealthCheckRequest) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -74,13 +148,165 @@ func (x *HealthCheckRequest) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *HealthCheckRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *HealthCheckRequest) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *HealthCheckRequest) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *HealthCheckRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil && x.Body.StableSize() != 0 {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil && x.Signature.StableSize() != 0 {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *HealthCheckRequest) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "HealthCheckRequest")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(HealthCheckRequest_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *HealthCheckRequest) GetBody() *HealthCheckRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *HealthCheckRequest) SetBody(v *HealthCheckRequest_Body) {
+ x.Body = v
+}
+func (x *HealthCheckRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *HealthCheckRequest) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *HealthCheckRequest) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *HealthCheckRequest) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ const prefix string = ",\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *HealthCheckRequest) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *HealthCheckRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *HealthCheckRequest_Body
+ f = new(HealthCheckRequest_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type HealthCheckResponse_Body struct {
+ NetmapStatus NetmapStatus `json:"netmapStatus"`
+ HealthStatus HealthStatus `json:"healthStatus"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*HealthCheckResponse_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*HealthCheckResponse_Body)(nil)
+ _ json.Marshaler = (*HealthCheckResponse_Body)(nil)
+ _ json.Unmarshaler = (*HealthCheckResponse_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -93,27 +319,186 @@ func (x *HealthCheckResponse_Body) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *HealthCheckResponse_Body) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.EnumMarshal(1, buf[offset:], int32(x.NetmapStatus))
- offset += proto.EnumMarshal(2, buf[offset:], int32(x.HealthStatus))
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *HealthCheckResponse_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *HealthCheckResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if int32(x.NetmapStatus) != 0 {
+ mm.AppendInt32(1, int32(x.NetmapStatus))
+ }
+ if int32(x.HealthStatus) != 0 {
+ mm.AppendInt32(2, int32(x.HealthStatus))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *HealthCheckResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "HealthCheckResponse_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // NetmapStatus
+ data, ok := fc.Int32()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "NetmapStatus")
+ }
+ x.NetmapStatus = NetmapStatus(data)
+ case 2: // HealthStatus
+ data, ok := fc.Int32()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "HealthStatus")
+ }
+ x.HealthStatus = HealthStatus(data)
+ }
+ }
+ return nil
+}
+func (x *HealthCheckResponse_Body) GetNetmapStatus() NetmapStatus {
+ if x != nil {
+ return x.NetmapStatus
+ }
+ return 0
+}
+func (x *HealthCheckResponse_Body) SetNetmapStatus(v NetmapStatus) {
+ x.NetmapStatus = v
+}
+func (x *HealthCheckResponse_Body) GetHealthStatus() HealthStatus {
+ if x != nil {
+ return x.HealthStatus
+ }
+ return 0
+}
+func (x *HealthCheckResponse_Body) SetHealthStatus(v HealthStatus) {
+ x.HealthStatus = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *HealthCheckResponse_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *HealthCheckResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"netmapStatus\":"
+ out.RawString(prefix[1:])
+ out.Int32(int32(x.NetmapStatus))
+ }
+ {
+ const prefix string = ",\"healthStatus\":"
+ out.RawString(prefix)
+ out.Int32(int32(x.HealthStatus))
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *HealthCheckResponse_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *HealthCheckResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "netmapStatus":
+ {
+ var f NetmapStatus
+ var parsedValue NetmapStatus
+ switch v := in.Interface().(type) {
+ case string:
+ if vv, ok := NetmapStatus_value[v]; ok {
+ parsedValue = NetmapStatus(vv)
+ break
+ }
+ vv, err := strconv.ParseInt(v, 10, 32)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ parsedValue = NetmapStatus(vv)
+ case float64:
+ parsedValue = NetmapStatus(v)
+ }
+ f = parsedValue
+ x.NetmapStatus = f
+ }
+ case "healthStatus":
+ {
+ var f HealthStatus
+ var parsedValue HealthStatus
+ switch v := in.Interface().(type) {
+ case string:
+ if vv, ok := HealthStatus_value[v]; ok {
+ parsedValue = HealthStatus(vv)
+ break
+ }
+ vv, err := strconv.ParseInt(v, 10, 32)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ parsedValue = HealthStatus(vv)
+ case float64:
+ parsedValue = HealthStatus(v)
+ }
+ f = parsedValue
+ x.HealthStatus = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type HealthCheckResponse struct {
+ Body *HealthCheckResponse_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*HealthCheckResponse)(nil)
+ _ encoding.ProtoUnmarshaler = (*HealthCheckResponse)(nil)
+ _ json.Marshaler = (*HealthCheckResponse)(nil)
+ _ json.Unmarshaler = (*HealthCheckResponse)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -126,27 +511,6 @@ func (x *HealthCheckResponse) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *HealthCheckResponse) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -162,13 +526,165 @@ func (x *HealthCheckResponse) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *HealthCheckResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *HealthCheckResponse) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *HealthCheckResponse) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *HealthCheckResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil && x.Body.StableSize() != 0 {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil && x.Signature.StableSize() != 0 {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *HealthCheckResponse) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "HealthCheckResponse")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(HealthCheckResponse_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *HealthCheckResponse) GetBody() *HealthCheckResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *HealthCheckResponse) SetBody(v *HealthCheckResponse_Body) {
+ x.Body = v
+}
+func (x *HealthCheckResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *HealthCheckResponse) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *HealthCheckResponse) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *HealthCheckResponse) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ const prefix string = ",\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *HealthCheckResponse) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *HealthCheckResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *HealthCheckResponse_Body
+ f = new(HealthCheckResponse_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type SetNetmapStatusRequest_Body struct {
+ Status NetmapStatus `json:"status"`
+ ForceMaintenance bool `json:"forceMaintenance"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*SetNetmapStatusRequest_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*SetNetmapStatusRequest_Body)(nil)
+ _ json.Marshaler = (*SetNetmapStatusRequest_Body)(nil)
+ _ json.Unmarshaler = (*SetNetmapStatusRequest_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -181,27 +697,170 @@ func (x *SetNetmapStatusRequest_Body) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *SetNetmapStatusRequest_Body) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.EnumMarshal(1, buf[offset:], int32(x.Status))
- offset += proto.BoolMarshal(2, buf[offset:], x.ForceMaintenance)
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *SetNetmapStatusRequest_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *SetNetmapStatusRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if int32(x.Status) != 0 {
+ mm.AppendInt32(1, int32(x.Status))
+ }
+ if x.ForceMaintenance {
+ mm.AppendBool(2, x.ForceMaintenance)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *SetNetmapStatusRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "SetNetmapStatusRequest_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // Status
+ data, ok := fc.Int32()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Status")
+ }
+ x.Status = NetmapStatus(data)
+ case 2: // ForceMaintenance
+ data, ok := fc.Bool()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "ForceMaintenance")
+ }
+ x.ForceMaintenance = data
+ }
+ }
+ return nil
+}
+func (x *SetNetmapStatusRequest_Body) GetStatus() NetmapStatus {
+ if x != nil {
+ return x.Status
+ }
+ return 0
+}
+func (x *SetNetmapStatusRequest_Body) SetStatus(v NetmapStatus) {
+ x.Status = v
+}
+func (x *SetNetmapStatusRequest_Body) GetForceMaintenance() bool {
+ if x != nil {
+ return x.ForceMaintenance
+ }
+ return false
+}
+func (x *SetNetmapStatusRequest_Body) SetForceMaintenance(v bool) {
+ x.ForceMaintenance = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *SetNetmapStatusRequest_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *SetNetmapStatusRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"status\":"
+ out.RawString(prefix[1:])
+ out.Int32(int32(x.Status))
+ }
+ {
+ const prefix string = ",\"forceMaintenance\":"
+ out.RawString(prefix)
+ out.Bool(x.ForceMaintenance)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *SetNetmapStatusRequest_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *SetNetmapStatusRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "status":
+ {
+ var f NetmapStatus
+ var parsedValue NetmapStatus
+ switch v := in.Interface().(type) {
+ case string:
+ if vv, ok := NetmapStatus_value[v]; ok {
+ parsedValue = NetmapStatus(vv)
+ break
+ }
+ vv, err := strconv.ParseInt(v, 10, 32)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ parsedValue = NetmapStatus(vv)
+ case float64:
+ parsedValue = NetmapStatus(v)
+ }
+ f = parsedValue
+ x.Status = f
+ }
+ case "forceMaintenance":
+ {
+ var f bool
+ f = in.Bool()
+ x.ForceMaintenance = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type SetNetmapStatusRequest struct {
+ Body *SetNetmapStatusRequest_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*SetNetmapStatusRequest)(nil)
+ _ encoding.ProtoUnmarshaler = (*SetNetmapStatusRequest)(nil)
+ _ json.Marshaler = (*SetNetmapStatusRequest)(nil)
+ _ json.Unmarshaler = (*SetNetmapStatusRequest)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -214,27 +873,6 @@ func (x *SetNetmapStatusRequest) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *SetNetmapStatusRequest) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -250,13 +888,163 @@ func (x *SetNetmapStatusRequest) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *SetNetmapStatusRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *SetNetmapStatusRequest) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *SetNetmapStatusRequest) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *SetNetmapStatusRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil && x.Body.StableSize() != 0 {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil && x.Signature.StableSize() != 0 {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *SetNetmapStatusRequest) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "SetNetmapStatusRequest")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(SetNetmapStatusRequest_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *SetNetmapStatusRequest) GetBody() *SetNetmapStatusRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *SetNetmapStatusRequest) SetBody(v *SetNetmapStatusRequest_Body) {
+ x.Body = v
+}
+func (x *SetNetmapStatusRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *SetNetmapStatusRequest) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *SetNetmapStatusRequest) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *SetNetmapStatusRequest) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ const prefix string = ",\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *SetNetmapStatusRequest) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *SetNetmapStatusRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *SetNetmapStatusRequest_Body
+ f = new(SetNetmapStatusRequest_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type SetNetmapStatusResponse_Body struct {
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*SetNetmapStatusResponse_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*SetNetmapStatusResponse_Body)(nil)
+ _ json.Marshaler = (*SetNetmapStatusResponse_Body)(nil)
+ _ json.Unmarshaler = (*SetNetmapStatusResponse_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -267,18 +1055,93 @@ func (x *SetNetmapStatusResponse_Body) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *SetNetmapStatusResponse_Body) StableMarshal(buf []byte) []byte {
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *SetNetmapStatusResponse_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *SetNetmapStatusResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *SetNetmapStatusResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "SetNetmapStatusResponse_Body")
+ }
+ switch fc.FieldNum {
+ }
+ }
+ return nil
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *SetNetmapStatusResponse_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *SetNetmapStatusResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *SetNetmapStatusResponse_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *SetNetmapStatusResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type SetNetmapStatusResponse struct {
+ Body *SetNetmapStatusResponse_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*SetNetmapStatusResponse)(nil)
+ _ encoding.ProtoUnmarshaler = (*SetNetmapStatusResponse)(nil)
+ _ json.Marshaler = (*SetNetmapStatusResponse)(nil)
+ _ json.Unmarshaler = (*SetNetmapStatusResponse)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -291,27 +1154,6 @@ func (x *SetNetmapStatusResponse) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *SetNetmapStatusResponse) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -327,13 +1169,163 @@ func (x *SetNetmapStatusResponse) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *SetNetmapStatusResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *SetNetmapStatusResponse) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *SetNetmapStatusResponse) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *SetNetmapStatusResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil && x.Body.StableSize() != 0 {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil && x.Signature.StableSize() != 0 {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *SetNetmapStatusResponse) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "SetNetmapStatusResponse")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(SetNetmapStatusResponse_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *SetNetmapStatusResponse) GetBody() *SetNetmapStatusResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *SetNetmapStatusResponse) SetBody(v *SetNetmapStatusResponse_Body) {
+ x.Body = v
+}
+func (x *SetNetmapStatusResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *SetNetmapStatusResponse) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *SetNetmapStatusResponse) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *SetNetmapStatusResponse) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ const prefix string = ",\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *SetNetmapStatusResponse) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *SetNetmapStatusResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *SetNetmapStatusResponse_Body
+ f = new(SetNetmapStatusResponse_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type GetNetmapStatusRequest_Body struct {
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*GetNetmapStatusRequest_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*GetNetmapStatusRequest_Body)(nil)
+ _ json.Marshaler = (*GetNetmapStatusRequest_Body)(nil)
+ _ json.Unmarshaler = (*GetNetmapStatusRequest_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -344,18 +1336,93 @@ func (x *GetNetmapStatusRequest_Body) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *GetNetmapStatusRequest_Body) StableMarshal(buf []byte) []byte {
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *GetNetmapStatusRequest_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *GetNetmapStatusRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *GetNetmapStatusRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "GetNetmapStatusRequest_Body")
+ }
+ switch fc.FieldNum {
+ }
+ }
+ return nil
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *GetNetmapStatusRequest_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *GetNetmapStatusRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *GetNetmapStatusRequest_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *GetNetmapStatusRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type GetNetmapStatusRequest struct {
+ Body *GetNetmapStatusRequest_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*GetNetmapStatusRequest)(nil)
+ _ encoding.ProtoUnmarshaler = (*GetNetmapStatusRequest)(nil)
+ _ json.Marshaler = (*GetNetmapStatusRequest)(nil)
+ _ json.Unmarshaler = (*GetNetmapStatusRequest)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -368,27 +1435,6 @@ func (x *GetNetmapStatusRequest) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *GetNetmapStatusRequest) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -404,13 +1450,165 @@ func (x *GetNetmapStatusRequest) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *GetNetmapStatusRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *GetNetmapStatusRequest) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *GetNetmapStatusRequest) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *GetNetmapStatusRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil && x.Body.StableSize() != 0 {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil && x.Signature.StableSize() != 0 {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *GetNetmapStatusRequest) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "GetNetmapStatusRequest")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(GetNetmapStatusRequest_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *GetNetmapStatusRequest) GetBody() *GetNetmapStatusRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *GetNetmapStatusRequest) SetBody(v *GetNetmapStatusRequest_Body) {
+ x.Body = v
+}
+func (x *GetNetmapStatusRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *GetNetmapStatusRequest) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *GetNetmapStatusRequest) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *GetNetmapStatusRequest) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ const prefix string = ",\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *GetNetmapStatusRequest) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *GetNetmapStatusRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *GetNetmapStatusRequest_Body
+ f = new(GetNetmapStatusRequest_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type GetNetmapStatusResponse_Body struct {
+ Status NetmapStatus `json:"status"`
+ Epoch uint64 `json:"epoch"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*GetNetmapStatusResponse_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*GetNetmapStatusResponse_Body)(nil)
+ _ json.Marshaler = (*GetNetmapStatusResponse_Body)(nil)
+ _ json.Unmarshaler = (*GetNetmapStatusResponse_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -423,27 +1621,170 @@ func (x *GetNetmapStatusResponse_Body) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *GetNetmapStatusResponse_Body) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.EnumMarshal(1, buf[offset:], int32(x.Status))
- offset += proto.UInt64Marshal(2, buf[offset:], x.Epoch)
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *GetNetmapStatusResponse_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *GetNetmapStatusResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if int32(x.Status) != 0 {
+ mm.AppendInt32(1, int32(x.Status))
+ }
+ if x.Epoch != 0 {
+ mm.AppendUint64(2, x.Epoch)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *GetNetmapStatusResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "GetNetmapStatusResponse_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // Status
+ data, ok := fc.Int32()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Status")
+ }
+ x.Status = NetmapStatus(data)
+ case 2: // Epoch
+ data, ok := fc.Uint64()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Epoch")
+ }
+ x.Epoch = data
+ }
+ }
+ return nil
+}
+func (x *GetNetmapStatusResponse_Body) GetStatus() NetmapStatus {
+ if x != nil {
+ return x.Status
+ }
+ return 0
+}
+func (x *GetNetmapStatusResponse_Body) SetStatus(v NetmapStatus) {
+ x.Status = v
+}
+func (x *GetNetmapStatusResponse_Body) GetEpoch() uint64 {
+ if x != nil {
+ return x.Epoch
+ }
+ return 0
+}
+func (x *GetNetmapStatusResponse_Body) SetEpoch(v uint64) {
+ x.Epoch = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *GetNetmapStatusResponse_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *GetNetmapStatusResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"status\":"
+ out.RawString(prefix[1:])
+ out.Int32(int32(x.Status))
+ }
+ {
+ const prefix string = ",\"epoch\":"
+ out.RawString(prefix)
+ out.Uint64(x.Epoch)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *GetNetmapStatusResponse_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *GetNetmapStatusResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "status":
+ {
+ var f NetmapStatus
+ var parsedValue NetmapStatus
+ switch v := in.Interface().(type) {
+ case string:
+ if vv, ok := NetmapStatus_value[v]; ok {
+ parsedValue = NetmapStatus(vv)
+ break
+ }
+ vv, err := strconv.ParseInt(v, 10, 32)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ parsedValue = NetmapStatus(vv)
+ case float64:
+ parsedValue = NetmapStatus(v)
+ }
+ f = parsedValue
+ x.Status = f
+ }
+ case "epoch":
+ {
+ var f uint64
+ f = in.Uint64()
+ x.Epoch = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type GetNetmapStatusResponse struct {
+ Body *GetNetmapStatusResponse_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*GetNetmapStatusResponse)(nil)
+ _ encoding.ProtoUnmarshaler = (*GetNetmapStatusResponse)(nil)
+ _ json.Marshaler = (*GetNetmapStatusResponse)(nil)
+ _ json.Unmarshaler = (*GetNetmapStatusResponse)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -456,27 +1797,6 @@ func (x *GetNetmapStatusResponse) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *GetNetmapStatusResponse) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -492,13 +1812,164 @@ func (x *GetNetmapStatusResponse) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *GetNetmapStatusResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *GetNetmapStatusResponse) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *GetNetmapStatusResponse) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *GetNetmapStatusResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil && x.Body.StableSize() != 0 {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil && x.Signature.StableSize() != 0 {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *GetNetmapStatusResponse) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "GetNetmapStatusResponse")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(GetNetmapStatusResponse_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *GetNetmapStatusResponse) GetBody() *GetNetmapStatusResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *GetNetmapStatusResponse) SetBody(v *GetNetmapStatusResponse_Body) {
+ x.Body = v
+}
+func (x *GetNetmapStatusResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *GetNetmapStatusResponse) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *GetNetmapStatusResponse) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *GetNetmapStatusResponse) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ const prefix string = ",\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *GetNetmapStatusResponse) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *GetNetmapStatusResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *GetNetmapStatusResponse_Body
+ f = new(GetNetmapStatusResponse_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type DropObjectsRequest_Body struct {
+ AddressList [][]byte `json:"addressList"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*DropObjectsRequest_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*DropObjectsRequest_Body)(nil)
+ _ json.Marshaler = (*DropObjectsRequest_Body)(nil)
+ _ json.Unmarshaler = (*DropObjectsRequest_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -510,26 +1981,139 @@ func (x *DropObjectsRequest_Body) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *DropObjectsRequest_Body) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.RepeatedBytesMarshal(1, buf[offset:], x.AddressList)
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *DropObjectsRequest_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *DropObjectsRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ for j := range x.AddressList {
+ mm.AppendBytes(1, x.AddressList[j])
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *DropObjectsRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "DropObjectsRequest_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // AddressList
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "AddressList")
+ }
+ x.AddressList = append(x.AddressList, data)
+ }
+ }
+ return nil
+}
+func (x *DropObjectsRequest_Body) GetAddressList() [][]byte {
+ if x != nil {
+ return x.AddressList
+ }
+ return nil
+}
+func (x *DropObjectsRequest_Body) SetAddressList(v [][]byte) {
+ x.AddressList = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *DropObjectsRequest_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *DropObjectsRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"addressList\":"
+ out.RawString(prefix[1:])
+ out.RawByte('[')
+ for i := range x.AddressList {
+ if i != 0 {
+ out.RawByte(',')
+ }
+ out.Base64Bytes(x.AddressList[i])
+ }
+ out.RawByte(']')
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *DropObjectsRequest_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *DropObjectsRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "addressList":
+ {
+ var f []byte
+ var list [][]byte
+ in.Delim('[')
+ for !in.IsDelim(']') {
+ f = in.Bytes()
+ list = append(list, f)
+ in.WantComma()
+ }
+ x.AddressList = list
+ in.Delim(']')
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type DropObjectsRequest struct {
+ Body *DropObjectsRequest_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*DropObjectsRequest)(nil)
+ _ encoding.ProtoUnmarshaler = (*DropObjectsRequest)(nil)
+ _ json.Marshaler = (*DropObjectsRequest)(nil)
+ _ json.Unmarshaler = (*DropObjectsRequest)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -542,27 +2126,6 @@ func (x *DropObjectsRequest) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *DropObjectsRequest) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -578,13 +2141,163 @@ func (x *DropObjectsRequest) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *DropObjectsRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *DropObjectsRequest) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *DropObjectsRequest) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *DropObjectsRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil && x.Body.StableSize() != 0 {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil && x.Signature.StableSize() != 0 {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *DropObjectsRequest) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "DropObjectsRequest")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(DropObjectsRequest_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *DropObjectsRequest) GetBody() *DropObjectsRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *DropObjectsRequest) SetBody(v *DropObjectsRequest_Body) {
+ x.Body = v
+}
+func (x *DropObjectsRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *DropObjectsRequest) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *DropObjectsRequest) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *DropObjectsRequest) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ const prefix string = ",\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *DropObjectsRequest) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *DropObjectsRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *DropObjectsRequest_Body
+ f = new(DropObjectsRequest_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type DropObjectsResponse_Body struct {
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*DropObjectsResponse_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*DropObjectsResponse_Body)(nil)
+ _ json.Marshaler = (*DropObjectsResponse_Body)(nil)
+ _ json.Unmarshaler = (*DropObjectsResponse_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -595,18 +2308,93 @@ func (x *DropObjectsResponse_Body) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *DropObjectsResponse_Body) StableMarshal(buf []byte) []byte {
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *DropObjectsResponse_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *DropObjectsResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *DropObjectsResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "DropObjectsResponse_Body")
+ }
+ switch fc.FieldNum {
+ }
+ }
+ return nil
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *DropObjectsResponse_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *DropObjectsResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *DropObjectsResponse_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *DropObjectsResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type DropObjectsResponse struct {
+ Body *DropObjectsResponse_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*DropObjectsResponse)(nil)
+ _ encoding.ProtoUnmarshaler = (*DropObjectsResponse)(nil)
+ _ json.Marshaler = (*DropObjectsResponse)(nil)
+ _ json.Unmarshaler = (*DropObjectsResponse)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -619,27 +2407,6 @@ func (x *DropObjectsResponse) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *DropObjectsResponse) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -655,13 +2422,163 @@ func (x *DropObjectsResponse) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *DropObjectsResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *DropObjectsResponse) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *DropObjectsResponse) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *DropObjectsResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil && x.Body.StableSize() != 0 {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil && x.Signature.StableSize() != 0 {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *DropObjectsResponse) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "DropObjectsResponse")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(DropObjectsResponse_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *DropObjectsResponse) GetBody() *DropObjectsResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *DropObjectsResponse) SetBody(v *DropObjectsResponse_Body) {
+ x.Body = v
+}
+func (x *DropObjectsResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *DropObjectsResponse) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *DropObjectsResponse) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *DropObjectsResponse) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ const prefix string = ",\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *DropObjectsResponse) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *DropObjectsResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *DropObjectsResponse_Body
+ f = new(DropObjectsResponse_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type ListShardsRequest_Body struct {
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*ListShardsRequest_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*ListShardsRequest_Body)(nil)
+ _ json.Marshaler = (*ListShardsRequest_Body)(nil)
+ _ json.Unmarshaler = (*ListShardsRequest_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -672,18 +2589,93 @@ func (x *ListShardsRequest_Body) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *ListShardsRequest_Body) StableMarshal(buf []byte) []byte {
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *ListShardsRequest_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *ListShardsRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *ListShardsRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "ListShardsRequest_Body")
+ }
+ switch fc.FieldNum {
+ }
+ }
+ return nil
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *ListShardsRequest_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *ListShardsRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *ListShardsRequest_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *ListShardsRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type ListShardsRequest struct {
+ Body *ListShardsRequest_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*ListShardsRequest)(nil)
+ _ encoding.ProtoUnmarshaler = (*ListShardsRequest)(nil)
+ _ json.Marshaler = (*ListShardsRequest)(nil)
+ _ json.Unmarshaler = (*ListShardsRequest)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -696,27 +2688,6 @@ func (x *ListShardsRequest) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *ListShardsRequest) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -732,13 +2703,164 @@ func (x *ListShardsRequest) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *ListShardsRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *ListShardsRequest) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *ListShardsRequest) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *ListShardsRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil && x.Body.StableSize() != 0 {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil && x.Signature.StableSize() != 0 {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *ListShardsRequest) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "ListShardsRequest")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(ListShardsRequest_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *ListShardsRequest) GetBody() *ListShardsRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *ListShardsRequest) SetBody(v *ListShardsRequest_Body) {
+ x.Body = v
+}
+func (x *ListShardsRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *ListShardsRequest) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *ListShardsRequest) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *ListShardsRequest) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ const prefix string = ",\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *ListShardsRequest) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *ListShardsRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *ListShardsRequest_Body
+ f = new(ListShardsRequest_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type ListShardsResponse_Body struct {
+ Shards []*ShardInfo `json:"shards"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*ListShardsResponse_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*ListShardsResponse_Body)(nil)
+ _ json.Marshaler = (*ListShardsResponse_Body)(nil)
+ _ json.Unmarshaler = (*ListShardsResponse_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -752,28 +2874,146 @@ func (x *ListShardsResponse_Body) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *ListShardsResponse_Body) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- for i := range x.Shards {
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Shards[i])
- }
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *ListShardsResponse_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *ListShardsResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ for i := range x.Shards {
+ if x.Shards[i] != nil && x.Shards[i].StableSize() != 0 {
+ x.Shards[i].EmitProtobuf(mm.AppendMessage(1))
+ }
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *ListShardsResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "ListShardsResponse_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // Shards
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Shards")
+ }
+ x.Shards = append(x.Shards, new(ShardInfo))
+ ff := x.Shards[len(x.Shards)-1]
+ if err := ff.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *ListShardsResponse_Body) GetShards() []*ShardInfo {
+ if x != nil {
+ return x.Shards
+ }
+ return nil
+}
+func (x *ListShardsResponse_Body) SetShards(v []*ShardInfo) {
+ x.Shards = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *ListShardsResponse_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *ListShardsResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"shards\":"
+ out.RawString(prefix[1:])
+ out.RawByte('[')
+ for i := range x.Shards {
+ if i != 0 {
+ out.RawByte(',')
+ }
+ x.Shards[i].MarshalEasyJSON(out)
+ }
+ out.RawByte(']')
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *ListShardsResponse_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *ListShardsResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "shards":
+ {
+ var f *ShardInfo
+ var list []*ShardInfo
+ in.Delim('[')
+ for !in.IsDelim(']') {
+ f = new(ShardInfo)
+ f.UnmarshalEasyJSON(in)
+ list = append(list, f)
+ in.WantComma()
+ }
+ x.Shards = list
+ in.Delim(']')
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type ListShardsResponse struct {
+ Body *ListShardsResponse_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*ListShardsResponse)(nil)
+ _ encoding.ProtoUnmarshaler = (*ListShardsResponse)(nil)
+ _ json.Marshaler = (*ListShardsResponse)(nil)
+ _ json.Unmarshaler = (*ListShardsResponse)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -786,27 +3026,6 @@ func (x *ListShardsResponse) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *ListShardsResponse) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -822,13 +3041,166 @@ func (x *ListShardsResponse) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *ListShardsResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *ListShardsResponse) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *ListShardsResponse) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *ListShardsResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil && x.Body.StableSize() != 0 {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil && x.Signature.StableSize() != 0 {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *ListShardsResponse) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "ListShardsResponse")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(ListShardsResponse_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *ListShardsResponse) GetBody() *ListShardsResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *ListShardsResponse) SetBody(v *ListShardsResponse_Body) {
+ x.Body = v
+}
+func (x *ListShardsResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *ListShardsResponse) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *ListShardsResponse) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *ListShardsResponse) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ const prefix string = ",\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *ListShardsResponse) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *ListShardsResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *ListShardsResponse_Body
+ f = new(ListShardsResponse_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type SetShardModeRequest_Body struct {
+ Shard_ID [][]byte `json:"shardID"`
+ Mode ShardMode `json:"mode"`
+ ResetErrorCounter bool `json:"resetErrorCounter"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*SetShardModeRequest_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*SetShardModeRequest_Body)(nil)
+ _ json.Marshaler = (*SetShardModeRequest_Body)(nil)
+ _ json.Unmarshaler = (*SetShardModeRequest_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -842,28 +3214,213 @@ func (x *SetShardModeRequest_Body) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *SetShardModeRequest_Body) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.RepeatedBytesMarshal(1, buf[offset:], x.Shard_ID)
- offset += proto.EnumMarshal(2, buf[offset:], int32(x.Mode))
- offset += proto.BoolMarshal(3, buf[offset:], x.ResetErrorCounter)
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *SetShardModeRequest_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *SetShardModeRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ for j := range x.Shard_ID {
+ mm.AppendBytes(1, x.Shard_ID[j])
+ }
+ if int32(x.Mode) != 0 {
+ mm.AppendInt32(2, int32(x.Mode))
+ }
+ if x.ResetErrorCounter {
+ mm.AppendBool(3, x.ResetErrorCounter)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *SetShardModeRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "SetShardModeRequest_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // Shard_ID
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Shard_ID")
+ }
+ x.Shard_ID = append(x.Shard_ID, data)
+ case 2: // Mode
+ data, ok := fc.Int32()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Mode")
+ }
+ x.Mode = ShardMode(data)
+ case 3: // ResetErrorCounter
+ data, ok := fc.Bool()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "ResetErrorCounter")
+ }
+ x.ResetErrorCounter = data
+ }
+ }
+ return nil
+}
+func (x *SetShardModeRequest_Body) GetShard_ID() [][]byte {
+ if x != nil {
+ return x.Shard_ID
+ }
+ return nil
+}
+func (x *SetShardModeRequest_Body) SetShard_ID(v [][]byte) {
+ x.Shard_ID = v
+}
+func (x *SetShardModeRequest_Body) GetMode() ShardMode {
+ if x != nil {
+ return x.Mode
+ }
+ return 0
+}
+func (x *SetShardModeRequest_Body) SetMode(v ShardMode) {
+ x.Mode = v
+}
+func (x *SetShardModeRequest_Body) GetResetErrorCounter() bool {
+ if x != nil {
+ return x.ResetErrorCounter
+ }
+ return false
+}
+func (x *SetShardModeRequest_Body) SetResetErrorCounter(v bool) {
+ x.ResetErrorCounter = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *SetShardModeRequest_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *SetShardModeRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"shardID\":"
+ out.RawString(prefix[1:])
+ out.RawByte('[')
+ for i := range x.Shard_ID {
+ if i != 0 {
+ out.RawByte(',')
+ }
+ out.Base64Bytes(x.Shard_ID[i])
+ }
+ out.RawByte(']')
+ }
+ {
+ const prefix string = ",\"mode\":"
+ out.RawString(prefix)
+ out.Int32(int32(x.Mode))
+ }
+ {
+ const prefix string = ",\"resetErrorCounter\":"
+ out.RawString(prefix)
+ out.Bool(x.ResetErrorCounter)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *SetShardModeRequest_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *SetShardModeRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "shardID":
+ {
+ var f []byte
+ var list [][]byte
+ in.Delim('[')
+ for !in.IsDelim(']') {
+ f = in.Bytes()
+ list = append(list, f)
+ in.WantComma()
+ }
+ x.Shard_ID = list
+ in.Delim(']')
+ }
+ case "mode":
+ {
+ var f ShardMode
+ var parsedValue ShardMode
+ switch v := in.Interface().(type) {
+ case string:
+ if vv, ok := ShardMode_value[v]; ok {
+ parsedValue = ShardMode(vv)
+ break
+ }
+ vv, err := strconv.ParseInt(v, 10, 32)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ parsedValue = ShardMode(vv)
+ case float64:
+ parsedValue = ShardMode(v)
+ }
+ f = parsedValue
+ x.Mode = f
+ }
+ case "resetErrorCounter":
+ {
+ var f bool
+ f = in.Bool()
+ x.ResetErrorCounter = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type SetShardModeRequest struct {
+ Body *SetShardModeRequest_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*SetShardModeRequest)(nil)
+ _ encoding.ProtoUnmarshaler = (*SetShardModeRequest)(nil)
+ _ json.Marshaler = (*SetShardModeRequest)(nil)
+ _ json.Unmarshaler = (*SetShardModeRequest)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -876,27 +3433,6 @@ func (x *SetShardModeRequest) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *SetShardModeRequest) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -912,13 +3448,163 @@ func (x *SetShardModeRequest) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *SetShardModeRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *SetShardModeRequest) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *SetShardModeRequest) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *SetShardModeRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil && x.Body.StableSize() != 0 {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil && x.Signature.StableSize() != 0 {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *SetShardModeRequest) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "SetShardModeRequest")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(SetShardModeRequest_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *SetShardModeRequest) GetBody() *SetShardModeRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *SetShardModeRequest) SetBody(v *SetShardModeRequest_Body) {
+ x.Body = v
+}
+func (x *SetShardModeRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *SetShardModeRequest) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *SetShardModeRequest) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *SetShardModeRequest) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ const prefix string = ",\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *SetShardModeRequest) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *SetShardModeRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *SetShardModeRequest_Body
+ f = new(SetShardModeRequest_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type SetShardModeResponse_Body struct {
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*SetShardModeResponse_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*SetShardModeResponse_Body)(nil)
+ _ json.Marshaler = (*SetShardModeResponse_Body)(nil)
+ _ json.Unmarshaler = (*SetShardModeResponse_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -929,18 +3615,93 @@ func (x *SetShardModeResponse_Body) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *SetShardModeResponse_Body) StableMarshal(buf []byte) []byte {
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *SetShardModeResponse_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *SetShardModeResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *SetShardModeResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "SetShardModeResponse_Body")
+ }
+ switch fc.FieldNum {
+ }
+ }
+ return nil
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *SetShardModeResponse_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *SetShardModeResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *SetShardModeResponse_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *SetShardModeResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type SetShardModeResponse struct {
+ Body *SetShardModeResponse_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*SetShardModeResponse)(nil)
+ _ encoding.ProtoUnmarshaler = (*SetShardModeResponse)(nil)
+ _ json.Marshaler = (*SetShardModeResponse)(nil)
+ _ json.Unmarshaler = (*SetShardModeResponse)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -953,27 +3714,6 @@ func (x *SetShardModeResponse) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *SetShardModeResponse) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -989,13 +3729,166 @@ func (x *SetShardModeResponse) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *SetShardModeResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *SetShardModeResponse) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *SetShardModeResponse) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *SetShardModeResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil && x.Body.StableSize() != 0 {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil && x.Signature.StableSize() != 0 {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *SetShardModeResponse) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "SetShardModeResponse")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(SetShardModeResponse_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *SetShardModeResponse) GetBody() *SetShardModeResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *SetShardModeResponse) SetBody(v *SetShardModeResponse_Body) {
+ x.Body = v
+}
+func (x *SetShardModeResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *SetShardModeResponse) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *SetShardModeResponse) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *SetShardModeResponse) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ const prefix string = ",\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *SetShardModeResponse) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *SetShardModeResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *SetShardModeResponse_Body
+ f = new(SetShardModeResponse_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type SynchronizeTreeRequest_Body struct {
+ ContainerId []byte `json:"containerId"`
+ TreeId string `json:"treeId"`
+ Height uint64 `json:"height"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*SynchronizeTreeRequest_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*SynchronizeTreeRequest_Body)(nil)
+ _ json.Marshaler = (*SynchronizeTreeRequest_Body)(nil)
+ _ json.Unmarshaler = (*SynchronizeTreeRequest_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -1009,28 +3902,183 @@ func (x *SynchronizeTreeRequest_Body) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *SynchronizeTreeRequest_Body) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.BytesMarshal(1, buf[offset:], x.ContainerId)
- offset += proto.StringMarshal(2, buf[offset:], x.TreeId)
- offset += proto.UInt64Marshal(3, buf[offset:], x.Height)
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *SynchronizeTreeRequest_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *SynchronizeTreeRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if len(x.ContainerId) != 0 {
+ mm.AppendBytes(1, x.ContainerId)
+ }
+ if len(x.TreeId) != 0 {
+ mm.AppendString(2, x.TreeId)
+ }
+ if x.Height != 0 {
+ mm.AppendUint64(3, x.Height)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *SynchronizeTreeRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "SynchronizeTreeRequest_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // ContainerId
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "ContainerId")
+ }
+ x.ContainerId = data
+ case 2: // TreeId
+ data, ok := fc.String()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "TreeId")
+ }
+ x.TreeId = data
+ case 3: // Height
+ data, ok := fc.Uint64()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Height")
+ }
+ x.Height = data
+ }
+ }
+ return nil
+}
+func (x *SynchronizeTreeRequest_Body) GetContainerId() []byte {
+ if x != nil {
+ return x.ContainerId
+ }
+ return nil
+}
+func (x *SynchronizeTreeRequest_Body) SetContainerId(v []byte) {
+ x.ContainerId = v
+}
+func (x *SynchronizeTreeRequest_Body) GetTreeId() string {
+ if x != nil {
+ return x.TreeId
+ }
+ return ""
+}
+func (x *SynchronizeTreeRequest_Body) SetTreeId(v string) {
+ x.TreeId = v
+}
+func (x *SynchronizeTreeRequest_Body) GetHeight() uint64 {
+ if x != nil {
+ return x.Height
+ }
+ return 0
+}
+func (x *SynchronizeTreeRequest_Body) SetHeight(v uint64) {
+ x.Height = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *SynchronizeTreeRequest_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *SynchronizeTreeRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"containerId\":"
+ out.RawString(prefix[1:])
+ out.Base64Bytes(x.ContainerId)
+ }
+ {
+ const prefix string = ",\"treeId\":"
+ out.RawString(prefix)
+ out.String(x.TreeId)
+ }
+ {
+ const prefix string = ",\"height\":"
+ out.RawString(prefix)
+ out.Uint64(x.Height)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *SynchronizeTreeRequest_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *SynchronizeTreeRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "containerId":
+ {
+ var f []byte
+ f = in.Bytes()
+ x.ContainerId = f
+ }
+ case "treeId":
+ {
+ var f string
+ f = in.String()
+ x.TreeId = f
+ }
+ case "height":
+ {
+ var f uint64
+ f = in.Uint64()
+ x.Height = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type SynchronizeTreeRequest struct {
+ Body *SynchronizeTreeRequest_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*SynchronizeTreeRequest)(nil)
+ _ encoding.ProtoUnmarshaler = (*SynchronizeTreeRequest)(nil)
+ _ json.Marshaler = (*SynchronizeTreeRequest)(nil)
+ _ json.Unmarshaler = (*SynchronizeTreeRequest)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -1043,27 +4091,6 @@ func (x *SynchronizeTreeRequest) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *SynchronizeTreeRequest) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -1079,13 +4106,163 @@ func (x *SynchronizeTreeRequest) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *SynchronizeTreeRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *SynchronizeTreeRequest) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *SynchronizeTreeRequest) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *SynchronizeTreeRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil && x.Body.StableSize() != 0 {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil && x.Signature.StableSize() != 0 {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *SynchronizeTreeRequest) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "SynchronizeTreeRequest")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(SynchronizeTreeRequest_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *SynchronizeTreeRequest) GetBody() *SynchronizeTreeRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *SynchronizeTreeRequest) SetBody(v *SynchronizeTreeRequest_Body) {
+ x.Body = v
+}
+func (x *SynchronizeTreeRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *SynchronizeTreeRequest) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *SynchronizeTreeRequest) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *SynchronizeTreeRequest) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ const prefix string = ",\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *SynchronizeTreeRequest) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *SynchronizeTreeRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *SynchronizeTreeRequest_Body
+ f = new(SynchronizeTreeRequest_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type SynchronizeTreeResponse_Body struct {
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*SynchronizeTreeResponse_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*SynchronizeTreeResponse_Body)(nil)
+ _ json.Marshaler = (*SynchronizeTreeResponse_Body)(nil)
+ _ json.Unmarshaler = (*SynchronizeTreeResponse_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -1096,18 +4273,93 @@ func (x *SynchronizeTreeResponse_Body) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *SynchronizeTreeResponse_Body) StableMarshal(buf []byte) []byte {
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *SynchronizeTreeResponse_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *SynchronizeTreeResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *SynchronizeTreeResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "SynchronizeTreeResponse_Body")
+ }
+ switch fc.FieldNum {
+ }
+ }
+ return nil
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *SynchronizeTreeResponse_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *SynchronizeTreeResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *SynchronizeTreeResponse_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *SynchronizeTreeResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type SynchronizeTreeResponse struct {
+ Body *SynchronizeTreeResponse_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*SynchronizeTreeResponse)(nil)
+ _ encoding.ProtoUnmarshaler = (*SynchronizeTreeResponse)(nil)
+ _ json.Marshaler = (*SynchronizeTreeResponse)(nil)
+ _ json.Unmarshaler = (*SynchronizeTreeResponse)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -1120,27 +4372,6 @@ func (x *SynchronizeTreeResponse) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *SynchronizeTreeResponse) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -1156,13 +4387,165 @@ func (x *SynchronizeTreeResponse) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *SynchronizeTreeResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *SynchronizeTreeResponse) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *SynchronizeTreeResponse) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *SynchronizeTreeResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil && x.Body.StableSize() != 0 {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil && x.Signature.StableSize() != 0 {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *SynchronizeTreeResponse) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "SynchronizeTreeResponse")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(SynchronizeTreeResponse_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *SynchronizeTreeResponse) GetBody() *SynchronizeTreeResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *SynchronizeTreeResponse) SetBody(v *SynchronizeTreeResponse_Body) {
+ x.Body = v
+}
+func (x *SynchronizeTreeResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *SynchronizeTreeResponse) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *SynchronizeTreeResponse) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *SynchronizeTreeResponse) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ const prefix string = ",\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *SynchronizeTreeResponse) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *SynchronizeTreeResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *SynchronizeTreeResponse_Body
+ f = new(SynchronizeTreeResponse_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type EvacuateShardRequest_Body struct {
+ Shard_ID [][]byte `json:"shardID"`
+ IgnoreErrors bool `json:"ignoreErrors"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*EvacuateShardRequest_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*EvacuateShardRequest_Body)(nil)
+ _ json.Marshaler = (*EvacuateShardRequest_Body)(nil)
+ _ json.Unmarshaler = (*EvacuateShardRequest_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -1175,27 +4558,168 @@ func (x *EvacuateShardRequest_Body) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *EvacuateShardRequest_Body) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.RepeatedBytesMarshal(1, buf[offset:], x.Shard_ID)
- offset += proto.BoolMarshal(2, buf[offset:], x.IgnoreErrors)
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *EvacuateShardRequest_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *EvacuateShardRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ for j := range x.Shard_ID {
+ mm.AppendBytes(1, x.Shard_ID[j])
+ }
+ if x.IgnoreErrors {
+ mm.AppendBool(2, x.IgnoreErrors)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *EvacuateShardRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "EvacuateShardRequest_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // Shard_ID
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Shard_ID")
+ }
+ x.Shard_ID = append(x.Shard_ID, data)
+ case 2: // IgnoreErrors
+ data, ok := fc.Bool()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "IgnoreErrors")
+ }
+ x.IgnoreErrors = data
+ }
+ }
+ return nil
+}
+func (x *EvacuateShardRequest_Body) GetShard_ID() [][]byte {
+ if x != nil {
+ return x.Shard_ID
+ }
+ return nil
+}
+func (x *EvacuateShardRequest_Body) SetShard_ID(v [][]byte) {
+ x.Shard_ID = v
+}
+func (x *EvacuateShardRequest_Body) GetIgnoreErrors() bool {
+ if x != nil {
+ return x.IgnoreErrors
+ }
+ return false
+}
+func (x *EvacuateShardRequest_Body) SetIgnoreErrors(v bool) {
+ x.IgnoreErrors = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *EvacuateShardRequest_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *EvacuateShardRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"shardID\":"
+ out.RawString(prefix[1:])
+ out.RawByte('[')
+ for i := range x.Shard_ID {
+ if i != 0 {
+ out.RawByte(',')
+ }
+ out.Base64Bytes(x.Shard_ID[i])
+ }
+ out.RawByte(']')
+ }
+ {
+ const prefix string = ",\"ignoreErrors\":"
+ out.RawString(prefix)
+ out.Bool(x.IgnoreErrors)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *EvacuateShardRequest_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *EvacuateShardRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "shardID":
+ {
+ var f []byte
+ var list [][]byte
+ in.Delim('[')
+ for !in.IsDelim(']') {
+ f = in.Bytes()
+ list = append(list, f)
+ in.WantComma()
+ }
+ x.Shard_ID = list
+ in.Delim(']')
+ }
+ case "ignoreErrors":
+ {
+ var f bool
+ f = in.Bool()
+ x.IgnoreErrors = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type EvacuateShardRequest struct {
+ Body *EvacuateShardRequest_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*EvacuateShardRequest)(nil)
+ _ encoding.ProtoUnmarshaler = (*EvacuateShardRequest)(nil)
+ _ json.Marshaler = (*EvacuateShardRequest)(nil)
+ _ json.Unmarshaler = (*EvacuateShardRequest)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -1208,27 +4732,6 @@ func (x *EvacuateShardRequest) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *EvacuateShardRequest) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -1244,13 +4747,164 @@ func (x *EvacuateShardRequest) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *EvacuateShardRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *EvacuateShardRequest) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *EvacuateShardRequest) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *EvacuateShardRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil && x.Body.StableSize() != 0 {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil && x.Signature.StableSize() != 0 {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *EvacuateShardRequest) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "EvacuateShardRequest")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(EvacuateShardRequest_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *EvacuateShardRequest) GetBody() *EvacuateShardRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *EvacuateShardRequest) SetBody(v *EvacuateShardRequest_Body) {
+ x.Body = v
+}
+func (x *EvacuateShardRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *EvacuateShardRequest) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *EvacuateShardRequest) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *EvacuateShardRequest) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ const prefix string = ",\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *EvacuateShardRequest) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *EvacuateShardRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *EvacuateShardRequest_Body
+ f = new(EvacuateShardRequest_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type EvacuateShardResponse_Body struct {
+ Count uint32 `json:"count"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*EvacuateShardResponse_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*EvacuateShardResponse_Body)(nil)
+ _ json.Marshaler = (*EvacuateShardResponse_Body)(nil)
+ _ json.Unmarshaler = (*EvacuateShardResponse_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -1262,26 +4916,125 @@ func (x *EvacuateShardResponse_Body) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *EvacuateShardResponse_Body) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.UInt32Marshal(1, buf[offset:], x.Count)
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *EvacuateShardResponse_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *EvacuateShardResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Count != 0 {
+ mm.AppendUint32(1, x.Count)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *EvacuateShardResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "EvacuateShardResponse_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // Count
+ data, ok := fc.Uint32()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Count")
+ }
+ x.Count = data
+ }
+ }
+ return nil
+}
+func (x *EvacuateShardResponse_Body) GetCount() uint32 {
+ if x != nil {
+ return x.Count
+ }
+ return 0
+}
+func (x *EvacuateShardResponse_Body) SetCount(v uint32) {
+ x.Count = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *EvacuateShardResponse_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *EvacuateShardResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"count\":"
+ out.RawString(prefix[1:])
+ out.Uint32(x.Count)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *EvacuateShardResponse_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *EvacuateShardResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "count":
+ {
+ var f uint32
+ f = in.Uint32()
+ x.Count = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type EvacuateShardResponse struct {
+ Body *EvacuateShardResponse_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*EvacuateShardResponse)(nil)
+ _ encoding.ProtoUnmarshaler = (*EvacuateShardResponse)(nil)
+ _ json.Marshaler = (*EvacuateShardResponse)(nil)
+ _ json.Unmarshaler = (*EvacuateShardResponse)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -1294,27 +5047,6 @@ func (x *EvacuateShardResponse) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *EvacuateShardResponse) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -1330,13 +5062,165 @@ func (x *EvacuateShardResponse) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *EvacuateShardResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *EvacuateShardResponse) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *EvacuateShardResponse) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *EvacuateShardResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil && x.Body.StableSize() != 0 {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil && x.Signature.StableSize() != 0 {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *EvacuateShardResponse) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "EvacuateShardResponse")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(EvacuateShardResponse_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *EvacuateShardResponse) GetBody() *EvacuateShardResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *EvacuateShardResponse) SetBody(v *EvacuateShardResponse_Body) {
+ x.Body = v
+}
+func (x *EvacuateShardResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *EvacuateShardResponse) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *EvacuateShardResponse) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *EvacuateShardResponse) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ const prefix string = ",\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *EvacuateShardResponse) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *EvacuateShardResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *EvacuateShardResponse_Body
+ f = new(EvacuateShardResponse_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type FlushCacheRequest_Body struct {
+ Shard_ID [][]byte `json:"shardID"`
+ Seal bool `json:"seal"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*FlushCacheRequest_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*FlushCacheRequest_Body)(nil)
+ _ json.Marshaler = (*FlushCacheRequest_Body)(nil)
+ _ json.Unmarshaler = (*FlushCacheRequest_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -1349,27 +5233,168 @@ func (x *FlushCacheRequest_Body) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *FlushCacheRequest_Body) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.RepeatedBytesMarshal(1, buf[offset:], x.Shard_ID)
- offset += proto.BoolMarshal(2, buf[offset:], x.Seal)
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *FlushCacheRequest_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *FlushCacheRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ for j := range x.Shard_ID {
+ mm.AppendBytes(1, x.Shard_ID[j])
+ }
+ if x.Seal {
+ mm.AppendBool(2, x.Seal)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *FlushCacheRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "FlushCacheRequest_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // Shard_ID
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Shard_ID")
+ }
+ x.Shard_ID = append(x.Shard_ID, data)
+ case 2: // Seal
+ data, ok := fc.Bool()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Seal")
+ }
+ x.Seal = data
+ }
+ }
+ return nil
+}
+func (x *FlushCacheRequest_Body) GetShard_ID() [][]byte {
+ if x != nil {
+ return x.Shard_ID
+ }
+ return nil
+}
+func (x *FlushCacheRequest_Body) SetShard_ID(v [][]byte) {
+ x.Shard_ID = v
+}
+func (x *FlushCacheRequest_Body) GetSeal() bool {
+ if x != nil {
+ return x.Seal
+ }
+ return false
+}
+func (x *FlushCacheRequest_Body) SetSeal(v bool) {
+ x.Seal = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *FlushCacheRequest_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *FlushCacheRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"shardID\":"
+ out.RawString(prefix[1:])
+ out.RawByte('[')
+ for i := range x.Shard_ID {
+ if i != 0 {
+ out.RawByte(',')
+ }
+ out.Base64Bytes(x.Shard_ID[i])
+ }
+ out.RawByte(']')
+ }
+ {
+ const prefix string = ",\"seal\":"
+ out.RawString(prefix)
+ out.Bool(x.Seal)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *FlushCacheRequest_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *FlushCacheRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "shardID":
+ {
+ var f []byte
+ var list [][]byte
+ in.Delim('[')
+ for !in.IsDelim(']') {
+ f = in.Bytes()
+ list = append(list, f)
+ in.WantComma()
+ }
+ x.Shard_ID = list
+ in.Delim(']')
+ }
+ case "seal":
+ {
+ var f bool
+ f = in.Bool()
+ x.Seal = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type FlushCacheRequest struct {
+ Body *FlushCacheRequest_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*FlushCacheRequest)(nil)
+ _ encoding.ProtoUnmarshaler = (*FlushCacheRequest)(nil)
+ _ json.Marshaler = (*FlushCacheRequest)(nil)
+ _ json.Unmarshaler = (*FlushCacheRequest)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -1382,27 +5407,6 @@ func (x *FlushCacheRequest) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *FlushCacheRequest) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -1418,13 +5422,163 @@ func (x *FlushCacheRequest) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *FlushCacheRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *FlushCacheRequest) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *FlushCacheRequest) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *FlushCacheRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil && x.Body.StableSize() != 0 {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil && x.Signature.StableSize() != 0 {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *FlushCacheRequest) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "FlushCacheRequest")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(FlushCacheRequest_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *FlushCacheRequest) GetBody() *FlushCacheRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *FlushCacheRequest) SetBody(v *FlushCacheRequest_Body) {
+ x.Body = v
+}
+func (x *FlushCacheRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *FlushCacheRequest) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *FlushCacheRequest) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *FlushCacheRequest) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ const prefix string = ",\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *FlushCacheRequest) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *FlushCacheRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *FlushCacheRequest_Body
+ f = new(FlushCacheRequest_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type FlushCacheResponse_Body struct {
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*FlushCacheResponse_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*FlushCacheResponse_Body)(nil)
+ _ json.Marshaler = (*FlushCacheResponse_Body)(nil)
+ _ json.Unmarshaler = (*FlushCacheResponse_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -1435,18 +5589,93 @@ func (x *FlushCacheResponse_Body) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *FlushCacheResponse_Body) StableMarshal(buf []byte) []byte {
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *FlushCacheResponse_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *FlushCacheResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *FlushCacheResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "FlushCacheResponse_Body")
+ }
+ switch fc.FieldNum {
+ }
+ }
+ return nil
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *FlushCacheResponse_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *FlushCacheResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *FlushCacheResponse_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *FlushCacheResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type FlushCacheResponse struct {
+ Body *FlushCacheResponse_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*FlushCacheResponse)(nil)
+ _ encoding.ProtoUnmarshaler = (*FlushCacheResponse)(nil)
+ _ json.Marshaler = (*FlushCacheResponse)(nil)
+ _ json.Unmarshaler = (*FlushCacheResponse)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -1459,27 +5688,6 @@ func (x *FlushCacheResponse) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *FlushCacheResponse) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -1495,13 +5703,165 @@ func (x *FlushCacheResponse) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *FlushCacheResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *FlushCacheResponse) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *FlushCacheResponse) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *FlushCacheResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil && x.Body.StableSize() != 0 {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil && x.Signature.StableSize() != 0 {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *FlushCacheResponse) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "FlushCacheResponse")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(FlushCacheResponse_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *FlushCacheResponse) GetBody() *FlushCacheResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *FlushCacheResponse) SetBody(v *FlushCacheResponse_Body) {
+ x.Body = v
+}
+func (x *FlushCacheResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *FlushCacheResponse) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *FlushCacheResponse) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *FlushCacheResponse) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ const prefix string = ",\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *FlushCacheResponse) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *FlushCacheResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *FlushCacheResponse_Body
+ f = new(FlushCacheResponse_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type DoctorRequest_Body struct {
+ Concurrency uint32 `json:"concurrency"`
+ RemoveDuplicates bool `json:"removeDuplicates"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*DoctorRequest_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*DoctorRequest_Body)(nil)
+ _ json.Marshaler = (*DoctorRequest_Body)(nil)
+ _ json.Unmarshaler = (*DoctorRequest_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -1514,27 +5874,154 @@ func (x *DoctorRequest_Body) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *DoctorRequest_Body) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.UInt32Marshal(1, buf[offset:], x.Concurrency)
- offset += proto.BoolMarshal(2, buf[offset:], x.RemoveDuplicates)
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *DoctorRequest_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *DoctorRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Concurrency != 0 {
+ mm.AppendUint32(1, x.Concurrency)
+ }
+ if x.RemoveDuplicates {
+ mm.AppendBool(2, x.RemoveDuplicates)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *DoctorRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "DoctorRequest_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // Concurrency
+ data, ok := fc.Uint32()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Concurrency")
+ }
+ x.Concurrency = data
+ case 2: // RemoveDuplicates
+ data, ok := fc.Bool()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "RemoveDuplicates")
+ }
+ x.RemoveDuplicates = data
+ }
+ }
+ return nil
+}
+func (x *DoctorRequest_Body) GetConcurrency() uint32 {
+ if x != nil {
+ return x.Concurrency
+ }
+ return 0
+}
+func (x *DoctorRequest_Body) SetConcurrency(v uint32) {
+ x.Concurrency = v
+}
+func (x *DoctorRequest_Body) GetRemoveDuplicates() bool {
+ if x != nil {
+ return x.RemoveDuplicates
+ }
+ return false
+}
+func (x *DoctorRequest_Body) SetRemoveDuplicates(v bool) {
+ x.RemoveDuplicates = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *DoctorRequest_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *DoctorRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"concurrency\":"
+ out.RawString(prefix[1:])
+ out.Uint32(x.Concurrency)
+ }
+ {
+ const prefix string = ",\"removeDuplicates\":"
+ out.RawString(prefix)
+ out.Bool(x.RemoveDuplicates)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *DoctorRequest_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *DoctorRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "concurrency":
+ {
+ var f uint32
+ f = in.Uint32()
+ x.Concurrency = f
+ }
+ case "removeDuplicates":
+ {
+ var f bool
+ f = in.Bool()
+ x.RemoveDuplicates = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type DoctorRequest struct {
+ Body *DoctorRequest_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*DoctorRequest)(nil)
+ _ encoding.ProtoUnmarshaler = (*DoctorRequest)(nil)
+ _ json.Marshaler = (*DoctorRequest)(nil)
+ _ json.Unmarshaler = (*DoctorRequest)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -1547,27 +6034,6 @@ func (x *DoctorRequest) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *DoctorRequest) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -1583,13 +6049,163 @@ func (x *DoctorRequest) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *DoctorRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *DoctorRequest) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *DoctorRequest) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *DoctorRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil && x.Body.StableSize() != 0 {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil && x.Signature.StableSize() != 0 {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *DoctorRequest) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "DoctorRequest")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(DoctorRequest_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *DoctorRequest) GetBody() *DoctorRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *DoctorRequest) SetBody(v *DoctorRequest_Body) {
+ x.Body = v
+}
+func (x *DoctorRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *DoctorRequest) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *DoctorRequest) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *DoctorRequest) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ const prefix string = ",\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *DoctorRequest) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *DoctorRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *DoctorRequest_Body
+ f = new(DoctorRequest_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type DoctorResponse_Body struct {
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*DoctorResponse_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*DoctorResponse_Body)(nil)
+ _ json.Marshaler = (*DoctorResponse_Body)(nil)
+ _ json.Unmarshaler = (*DoctorResponse_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -1600,18 +6216,93 @@ func (x *DoctorResponse_Body) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *DoctorResponse_Body) StableMarshal(buf []byte) []byte {
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *DoctorResponse_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *DoctorResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *DoctorResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "DoctorResponse_Body")
+ }
+ switch fc.FieldNum {
+ }
+ }
+ return nil
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *DoctorResponse_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *DoctorResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *DoctorResponse_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *DoctorResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type DoctorResponse struct {
+ Body *DoctorResponse_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*DoctorResponse)(nil)
+ _ encoding.ProtoUnmarshaler = (*DoctorResponse)(nil)
+ _ json.Marshaler = (*DoctorResponse)(nil)
+ _ json.Unmarshaler = (*DoctorResponse)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -1624,27 +6315,6 @@ func (x *DoctorResponse) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *DoctorResponse) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -1660,13 +6330,201 @@ func (x *DoctorResponse) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *DoctorResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *DoctorResponse) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *DoctorResponse) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *DoctorResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil && x.Body.StableSize() != 0 {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil && x.Signature.StableSize() != 0 {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *DoctorResponse) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "DoctorResponse")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(DoctorResponse_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *DoctorResponse) GetBody() *DoctorResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *DoctorResponse) SetBody(v *DoctorResponse_Body) {
+ x.Body = v
+}
+func (x *DoctorResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *DoctorResponse) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *DoctorResponse) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *DoctorResponse) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ const prefix string = ",\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *DoctorResponse) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *DoctorResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *DoctorResponse_Body
+ f = new(DoctorResponse_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type StartShardEvacuationRequest_Body_Scope int32
+
+const (
+ StartShardEvacuationRequest_Body_NONE StartShardEvacuationRequest_Body_Scope = 0
+ StartShardEvacuationRequest_Body_OBJECTS StartShardEvacuationRequest_Body_Scope = 1
+ StartShardEvacuationRequest_Body_TREES StartShardEvacuationRequest_Body_Scope = 2
+)
+
+var (
+ StartShardEvacuationRequest_Body_Scope_name = map[int32]string{
+ 0: "NONE",
+ 1: "OBJECTS",
+ 2: "TREES",
+ }
+ StartShardEvacuationRequest_Body_Scope_value = map[string]int32{
+ "NONE": 0,
+ "OBJECTS": 1,
+ "TREES": 2,
+ }
+)
+
+func (x StartShardEvacuationRequest_Body_Scope) String() string {
+ if v, ok := StartShardEvacuationRequest_Body_Scope_name[int32(x)]; ok {
+ return v
+ }
+ return strconv.FormatInt(int64(x), 10)
+}
+func (x *StartShardEvacuationRequest_Body_Scope) FromString(s string) bool {
+ if v, ok := StartShardEvacuationRequest_Body_Scope_value[s]; ok {
+ *x = StartShardEvacuationRequest_Body_Scope(v)
+ return true
+ }
+ return false
+}
+
+type StartShardEvacuationRequest_Body struct {
+ Shard_ID [][]byte `json:"shardID"`
+ IgnoreErrors bool `json:"ignoreErrors"`
+ Scope uint32 `json:"scope"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*StartShardEvacuationRequest_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*StartShardEvacuationRequest_Body)(nil)
+ _ json.Marshaler = (*StartShardEvacuationRequest_Body)(nil)
+ _ json.Unmarshaler = (*StartShardEvacuationRequest_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -1680,28 +6538,197 @@ func (x *StartShardEvacuationRequest_Body) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *StartShardEvacuationRequest_Body) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.RepeatedBytesMarshal(1, buf[offset:], x.Shard_ID)
- offset += proto.BoolMarshal(2, buf[offset:], x.IgnoreErrors)
- offset += proto.UInt32Marshal(3, buf[offset:], x.Scope)
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *StartShardEvacuationRequest_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *StartShardEvacuationRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ for j := range x.Shard_ID {
+ mm.AppendBytes(1, x.Shard_ID[j])
+ }
+ if x.IgnoreErrors {
+ mm.AppendBool(2, x.IgnoreErrors)
+ }
+ if x.Scope != 0 {
+ mm.AppendUint32(3, x.Scope)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *StartShardEvacuationRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "StartShardEvacuationRequest_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // Shard_ID
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Shard_ID")
+ }
+ x.Shard_ID = append(x.Shard_ID, data)
+ case 2: // IgnoreErrors
+ data, ok := fc.Bool()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "IgnoreErrors")
+ }
+ x.IgnoreErrors = data
+ case 3: // Scope
+ data, ok := fc.Uint32()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Scope")
+ }
+ x.Scope = data
+ }
+ }
+ return nil
+}
+func (x *StartShardEvacuationRequest_Body) GetShard_ID() [][]byte {
+ if x != nil {
+ return x.Shard_ID
+ }
+ return nil
+}
+func (x *StartShardEvacuationRequest_Body) SetShard_ID(v [][]byte) {
+ x.Shard_ID = v
+}
+func (x *StartShardEvacuationRequest_Body) GetIgnoreErrors() bool {
+ if x != nil {
+ return x.IgnoreErrors
+ }
+ return false
+}
+func (x *StartShardEvacuationRequest_Body) SetIgnoreErrors(v bool) {
+ x.IgnoreErrors = v
+}
+func (x *StartShardEvacuationRequest_Body) GetScope() uint32 {
+ if x != nil {
+ return x.Scope
+ }
+ return 0
+}
+func (x *StartShardEvacuationRequest_Body) SetScope(v uint32) {
+ x.Scope = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *StartShardEvacuationRequest_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *StartShardEvacuationRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"shardID\":"
+ out.RawString(prefix[1:])
+ out.RawByte('[')
+ for i := range x.Shard_ID {
+ if i != 0 {
+ out.RawByte(',')
+ }
+ out.Base64Bytes(x.Shard_ID[i])
+ }
+ out.RawByte(']')
+ }
+ {
+ const prefix string = ",\"ignoreErrors\":"
+ out.RawString(prefix)
+ out.Bool(x.IgnoreErrors)
+ }
+ {
+ const prefix string = ",\"scope\":"
+ out.RawString(prefix)
+ out.Uint32(x.Scope)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *StartShardEvacuationRequest_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *StartShardEvacuationRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "shardID":
+ {
+ var f []byte
+ var list [][]byte
+ in.Delim('[')
+ for !in.IsDelim(']') {
+ f = in.Bytes()
+ list = append(list, f)
+ in.WantComma()
+ }
+ x.Shard_ID = list
+ in.Delim(']')
+ }
+ case "ignoreErrors":
+ {
+ var f bool
+ f = in.Bool()
+ x.IgnoreErrors = f
+ }
+ case "scope":
+ {
+ var f uint32
+ f = in.Uint32()
+ x.Scope = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type StartShardEvacuationRequest struct {
+ Body *StartShardEvacuationRequest_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*StartShardEvacuationRequest)(nil)
+ _ encoding.ProtoUnmarshaler = (*StartShardEvacuationRequest)(nil)
+ _ json.Marshaler = (*StartShardEvacuationRequest)(nil)
+ _ json.Unmarshaler = (*StartShardEvacuationRequest)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -1714,27 +6741,6 @@ func (x *StartShardEvacuationRequest) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *StartShardEvacuationRequest) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -1750,13 +6756,163 @@ func (x *StartShardEvacuationRequest) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *StartShardEvacuationRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *StartShardEvacuationRequest) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *StartShardEvacuationRequest) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *StartShardEvacuationRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil && x.Body.StableSize() != 0 {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil && x.Signature.StableSize() != 0 {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *StartShardEvacuationRequest) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "StartShardEvacuationRequest")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(StartShardEvacuationRequest_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *StartShardEvacuationRequest) GetBody() *StartShardEvacuationRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *StartShardEvacuationRequest) SetBody(v *StartShardEvacuationRequest_Body) {
+ x.Body = v
+}
+func (x *StartShardEvacuationRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *StartShardEvacuationRequest) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *StartShardEvacuationRequest) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *StartShardEvacuationRequest) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ const prefix string = ",\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *StartShardEvacuationRequest) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *StartShardEvacuationRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *StartShardEvacuationRequest_Body
+ f = new(StartShardEvacuationRequest_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type StartShardEvacuationResponse_Body struct {
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*StartShardEvacuationResponse_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*StartShardEvacuationResponse_Body)(nil)
+ _ json.Marshaler = (*StartShardEvacuationResponse_Body)(nil)
+ _ json.Unmarshaler = (*StartShardEvacuationResponse_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -1767,18 +6923,93 @@ func (x *StartShardEvacuationResponse_Body) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *StartShardEvacuationResponse_Body) StableMarshal(buf []byte) []byte {
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *StartShardEvacuationResponse_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *StartShardEvacuationResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *StartShardEvacuationResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "StartShardEvacuationResponse_Body")
+ }
+ switch fc.FieldNum {
+ }
+ }
+ return nil
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *StartShardEvacuationResponse_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *StartShardEvacuationResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *StartShardEvacuationResponse_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *StartShardEvacuationResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type StartShardEvacuationResponse struct {
+ Body *StartShardEvacuationResponse_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*StartShardEvacuationResponse)(nil)
+ _ encoding.ProtoUnmarshaler = (*StartShardEvacuationResponse)(nil)
+ _ json.Marshaler = (*StartShardEvacuationResponse)(nil)
+ _ json.Unmarshaler = (*StartShardEvacuationResponse)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -1791,27 +7022,6 @@ func (x *StartShardEvacuationResponse) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *StartShardEvacuationResponse) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -1827,13 +7037,163 @@ func (x *StartShardEvacuationResponse) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *StartShardEvacuationResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *StartShardEvacuationResponse) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *StartShardEvacuationResponse) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *StartShardEvacuationResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil && x.Body.StableSize() != 0 {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil && x.Signature.StableSize() != 0 {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *StartShardEvacuationResponse) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "StartShardEvacuationResponse")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(StartShardEvacuationResponse_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *StartShardEvacuationResponse) GetBody() *StartShardEvacuationResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *StartShardEvacuationResponse) SetBody(v *StartShardEvacuationResponse_Body) {
+ x.Body = v
+}
+func (x *StartShardEvacuationResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *StartShardEvacuationResponse) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *StartShardEvacuationResponse) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *StartShardEvacuationResponse) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ const prefix string = ",\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *StartShardEvacuationResponse) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *StartShardEvacuationResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *StartShardEvacuationResponse_Body
+ f = new(StartShardEvacuationResponse_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type GetShardEvacuationStatusRequest_Body struct {
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*GetShardEvacuationStatusRequest_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*GetShardEvacuationStatusRequest_Body)(nil)
+ _ json.Marshaler = (*GetShardEvacuationStatusRequest_Body)(nil)
+ _ json.Unmarshaler = (*GetShardEvacuationStatusRequest_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -1844,18 +7204,93 @@ func (x *GetShardEvacuationStatusRequest_Body) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *GetShardEvacuationStatusRequest_Body) StableMarshal(buf []byte) []byte {
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *GetShardEvacuationStatusRequest_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *GetShardEvacuationStatusRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *GetShardEvacuationStatusRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "GetShardEvacuationStatusRequest_Body")
+ }
+ switch fc.FieldNum {
+ }
+ }
+ return nil
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *GetShardEvacuationStatusRequest_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *GetShardEvacuationStatusRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *GetShardEvacuationStatusRequest_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *GetShardEvacuationStatusRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type GetShardEvacuationStatusRequest struct {
+ Body *GetShardEvacuationStatusRequest_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*GetShardEvacuationStatusRequest)(nil)
+ _ encoding.ProtoUnmarshaler = (*GetShardEvacuationStatusRequest)(nil)
+ _ json.Marshaler = (*GetShardEvacuationStatusRequest)(nil)
+ _ json.Unmarshaler = (*GetShardEvacuationStatusRequest)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -1868,27 +7303,6 @@ func (x *GetShardEvacuationStatusRequest) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *GetShardEvacuationStatusRequest) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -1904,13 +7318,199 @@ func (x *GetShardEvacuationStatusRequest) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *GetShardEvacuationStatusRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *GetShardEvacuationStatusRequest) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *GetShardEvacuationStatusRequest) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *GetShardEvacuationStatusRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil && x.Body.StableSize() != 0 {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil && x.Signature.StableSize() != 0 {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *GetShardEvacuationStatusRequest) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "GetShardEvacuationStatusRequest")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(GetShardEvacuationStatusRequest_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *GetShardEvacuationStatusRequest) GetBody() *GetShardEvacuationStatusRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *GetShardEvacuationStatusRequest) SetBody(v *GetShardEvacuationStatusRequest_Body) {
+ x.Body = v
+}
+func (x *GetShardEvacuationStatusRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *GetShardEvacuationStatusRequest) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *GetShardEvacuationStatusRequest) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *GetShardEvacuationStatusRequest) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ const prefix string = ",\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *GetShardEvacuationStatusRequest) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *GetShardEvacuationStatusRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *GetShardEvacuationStatusRequest_Body
+ f = new(GetShardEvacuationStatusRequest_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type GetShardEvacuationStatusResponse_Body_Status int32
+
+const (
+ GetShardEvacuationStatusResponse_Body_EVACUATE_SHARD_STATUS_UNDEFINED GetShardEvacuationStatusResponse_Body_Status = 0
+ GetShardEvacuationStatusResponse_Body_RUNNING GetShardEvacuationStatusResponse_Body_Status = 1
+ GetShardEvacuationStatusResponse_Body_COMPLETED GetShardEvacuationStatusResponse_Body_Status = 2
+)
+
+var (
+ GetShardEvacuationStatusResponse_Body_Status_name = map[int32]string{
+ 0: "EVACUATE_SHARD_STATUS_UNDEFINED",
+ 1: "RUNNING",
+ 2: "COMPLETED",
+ }
+ GetShardEvacuationStatusResponse_Body_Status_value = map[string]int32{
+ "EVACUATE_SHARD_STATUS_UNDEFINED": 0,
+ "RUNNING": 1,
+ "COMPLETED": 2,
+ }
+)
+
+func (x GetShardEvacuationStatusResponse_Body_Status) String() string {
+ if v, ok := GetShardEvacuationStatusResponse_Body_Status_name[int32(x)]; ok {
+ return v
+ }
+ return strconv.FormatInt(int64(x), 10)
+}
+func (x *GetShardEvacuationStatusResponse_Body_Status) FromString(s string) bool {
+ if v, ok := GetShardEvacuationStatusResponse_Body_Status_value[s]; ok {
+ *x = GetShardEvacuationStatusResponse_Body_Status(v)
+ return true
+ }
+ return false
+}
+
+type GetShardEvacuationStatusResponse_Body_UnixTimestamp struct {
+ Value int64 `json:"value"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*GetShardEvacuationStatusResponse_Body_UnixTimestamp)(nil)
+ _ encoding.ProtoUnmarshaler = (*GetShardEvacuationStatusResponse_Body_UnixTimestamp)(nil)
+ _ json.Marshaler = (*GetShardEvacuationStatusResponse_Body_UnixTimestamp)(nil)
+ _ json.Unmarshaler = (*GetShardEvacuationStatusResponse_Body_UnixTimestamp)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -1922,26 +7522,124 @@ func (x *GetShardEvacuationStatusResponse_Body_UnixTimestamp) StableSize() (size
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *GetShardEvacuationStatusResponse_Body_UnixTimestamp) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.Int64Marshal(1, buf[offset:], x.Value)
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *GetShardEvacuationStatusResponse_Body_UnixTimestamp) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *GetShardEvacuationStatusResponse_Body_UnixTimestamp) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Value != 0 {
+ mm.AppendInt64(1, x.Value)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *GetShardEvacuationStatusResponse_Body_UnixTimestamp) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "GetShardEvacuationStatusResponse_Body_UnixTimestamp")
+ }
+ switch fc.FieldNum {
+ case 1: // Value
+ data, ok := fc.Int64()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Value")
+ }
+ x.Value = data
+ }
+ }
+ return nil
+}
+func (x *GetShardEvacuationStatusResponse_Body_UnixTimestamp) GetValue() int64 {
+ if x != nil {
+ return x.Value
+ }
+ return 0
+}
+func (x *GetShardEvacuationStatusResponse_Body_UnixTimestamp) SetValue(v int64) {
+ x.Value = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *GetShardEvacuationStatusResponse_Body_UnixTimestamp) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *GetShardEvacuationStatusResponse_Body_UnixTimestamp) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"value\":"
+ out.RawString(prefix[1:])
+ out.Int64(x.Value)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *GetShardEvacuationStatusResponse_Body_UnixTimestamp) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *GetShardEvacuationStatusResponse_Body_UnixTimestamp) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "value":
+ {
+ var f int64
+ f = in.Int64()
+ x.Value = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type GetShardEvacuationStatusResponse_Body_Duration struct {
+ Seconds int64 `json:"seconds"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*GetShardEvacuationStatusResponse_Body_Duration)(nil)
+ _ encoding.ProtoUnmarshaler = (*GetShardEvacuationStatusResponse_Body_Duration)(nil)
+ _ json.Marshaler = (*GetShardEvacuationStatusResponse_Body_Duration)(nil)
+ _ json.Unmarshaler = (*GetShardEvacuationStatusResponse_Body_Duration)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -1953,26 +7651,135 @@ func (x *GetShardEvacuationStatusResponse_Body_Duration) StableSize() (size int)
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *GetShardEvacuationStatusResponse_Body_Duration) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.Int64Marshal(1, buf[offset:], x.Seconds)
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *GetShardEvacuationStatusResponse_Body_Duration) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *GetShardEvacuationStatusResponse_Body_Duration) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Seconds != 0 {
+ mm.AppendInt64(1, x.Seconds)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *GetShardEvacuationStatusResponse_Body_Duration) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "GetShardEvacuationStatusResponse_Body_Duration")
+ }
+ switch fc.FieldNum {
+ case 1: // Seconds
+ data, ok := fc.Int64()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Seconds")
+ }
+ x.Seconds = data
+ }
+ }
+ return nil
+}
+func (x *GetShardEvacuationStatusResponse_Body_Duration) GetSeconds() int64 {
+ if x != nil {
+ return x.Seconds
+ }
+ return 0
+}
+func (x *GetShardEvacuationStatusResponse_Body_Duration) SetSeconds(v int64) {
+ x.Seconds = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *GetShardEvacuationStatusResponse_Body_Duration) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *GetShardEvacuationStatusResponse_Body_Duration) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"seconds\":"
+ out.RawString(prefix[1:])
+ out.Int64(x.Seconds)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *GetShardEvacuationStatusResponse_Body_Duration) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *GetShardEvacuationStatusResponse_Body_Duration) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "seconds":
+ {
+ var f int64
+ f = in.Int64()
+ x.Seconds = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type GetShardEvacuationStatusResponse_Body struct {
+ TotalObjects uint64 `json:"totalObjects"`
+ EvacuatedObjects uint64 `json:"evacuatedObjects"`
+ FailedObjects uint64 `json:"failedObjects"`
+ Shard_ID [][]byte `json:"shardID"`
+ Status GetShardEvacuationStatusResponse_Body_Status `json:"status"`
+ Duration *GetShardEvacuationStatusResponse_Body_Duration `json:"duration"`
+ StartedAt *GetShardEvacuationStatusResponse_Body_UnixTimestamp `json:"startedAt"`
+ ErrorMessage string `json:"errorMessage"`
+ SkippedObjects uint64 `json:"skippedObjects"`
+ TotalTrees uint64 `json:"totalTrees"`
+ EvacuatedTrees uint64 `json:"evacuatedTrees"`
+ FailedTrees uint64 `json:"failedTrees"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*GetShardEvacuationStatusResponse_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*GetShardEvacuationStatusResponse_Body)(nil)
+ _ json.Marshaler = (*GetShardEvacuationStatusResponse_Body)(nil)
+ _ json.Unmarshaler = (*GetShardEvacuationStatusResponse_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -1995,37 +7802,482 @@ func (x *GetShardEvacuationStatusResponse_Body) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *GetShardEvacuationStatusResponse_Body) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.UInt64Marshal(1, buf[offset:], x.TotalObjects)
- offset += proto.UInt64Marshal(2, buf[offset:], x.EvacuatedObjects)
- offset += proto.UInt64Marshal(3, buf[offset:], x.FailedObjects)
- offset += proto.RepeatedBytesMarshal(4, buf[offset:], x.Shard_ID)
- offset += proto.EnumMarshal(5, buf[offset:], int32(x.Status))
- offset += proto.NestedStructureMarshal(6, buf[offset:], x.Duration)
- offset += proto.NestedStructureMarshal(7, buf[offset:], x.StartedAt)
- offset += proto.StringMarshal(8, buf[offset:], x.ErrorMessage)
- offset += proto.UInt64Marshal(9, buf[offset:], x.SkippedObjects)
- offset += proto.UInt64Marshal(10, buf[offset:], x.TotalTrees)
- offset += proto.UInt64Marshal(11, buf[offset:], x.EvacuatedTrees)
- offset += proto.UInt64Marshal(12, buf[offset:], x.FailedTrees)
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *GetShardEvacuationStatusResponse_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *GetShardEvacuationStatusResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.TotalObjects != 0 {
+ mm.AppendUint64(1, x.TotalObjects)
+ }
+ if x.EvacuatedObjects != 0 {
+ mm.AppendUint64(2, x.EvacuatedObjects)
+ }
+ if x.FailedObjects != 0 {
+ mm.AppendUint64(3, x.FailedObjects)
+ }
+ for j := range x.Shard_ID {
+ mm.AppendBytes(4, x.Shard_ID[j])
+ }
+ if int32(x.Status) != 0 {
+ mm.AppendInt32(5, int32(x.Status))
+ }
+ if x.Duration != nil && x.Duration.StableSize() != 0 {
+ x.Duration.EmitProtobuf(mm.AppendMessage(6))
+ }
+ if x.StartedAt != nil && x.StartedAt.StableSize() != 0 {
+ x.StartedAt.EmitProtobuf(mm.AppendMessage(7))
+ }
+ if len(x.ErrorMessage) != 0 {
+ mm.AppendString(8, x.ErrorMessage)
+ }
+ if x.SkippedObjects != 0 {
+ mm.AppendUint64(9, x.SkippedObjects)
+ }
+ if x.TotalTrees != 0 {
+ mm.AppendUint64(10, x.TotalTrees)
+ }
+ if x.EvacuatedTrees != 0 {
+ mm.AppendUint64(11, x.EvacuatedTrees)
+ }
+ if x.FailedTrees != 0 {
+ mm.AppendUint64(12, x.FailedTrees)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *GetShardEvacuationStatusResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "GetShardEvacuationStatusResponse_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // TotalObjects
+ data, ok := fc.Uint64()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "TotalObjects")
+ }
+ x.TotalObjects = data
+ case 2: // EvacuatedObjects
+ data, ok := fc.Uint64()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "EvacuatedObjects")
+ }
+ x.EvacuatedObjects = data
+ case 3: // FailedObjects
+ data, ok := fc.Uint64()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "FailedObjects")
+ }
+ x.FailedObjects = data
+ case 4: // Shard_ID
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Shard_ID")
+ }
+ x.Shard_ID = append(x.Shard_ID, data)
+ case 5: // Status
+ data, ok := fc.Int32()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Status")
+ }
+ x.Status = GetShardEvacuationStatusResponse_Body_Status(data)
+ case 6: // Duration
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Duration")
+ }
+ x.Duration = new(GetShardEvacuationStatusResponse_Body_Duration)
+ if err := x.Duration.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 7: // StartedAt
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "StartedAt")
+ }
+ x.StartedAt = new(GetShardEvacuationStatusResponse_Body_UnixTimestamp)
+ if err := x.StartedAt.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 8: // ErrorMessage
+ data, ok := fc.String()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "ErrorMessage")
+ }
+ x.ErrorMessage = data
+ case 9: // SkippedObjects
+ data, ok := fc.Uint64()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "SkippedObjects")
+ }
+ x.SkippedObjects = data
+ case 10: // TotalTrees
+ data, ok := fc.Uint64()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "TotalTrees")
+ }
+ x.TotalTrees = data
+ case 11: // EvacuatedTrees
+ data, ok := fc.Uint64()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "EvacuatedTrees")
+ }
+ x.EvacuatedTrees = data
+ case 12: // FailedTrees
+ data, ok := fc.Uint64()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "FailedTrees")
+ }
+ x.FailedTrees = data
+ }
+ }
+ return nil
+}
+func (x *GetShardEvacuationStatusResponse_Body) GetTotalObjects() uint64 {
+ if x != nil {
+ return x.TotalObjects
+ }
+ return 0
+}
+func (x *GetShardEvacuationStatusResponse_Body) SetTotalObjects(v uint64) {
+ x.TotalObjects = v
+}
+func (x *GetShardEvacuationStatusResponse_Body) GetEvacuatedObjects() uint64 {
+ if x != nil {
+ return x.EvacuatedObjects
+ }
+ return 0
+}
+func (x *GetShardEvacuationStatusResponse_Body) SetEvacuatedObjects(v uint64) {
+ x.EvacuatedObjects = v
+}
+func (x *GetShardEvacuationStatusResponse_Body) GetFailedObjects() uint64 {
+ if x != nil {
+ return x.FailedObjects
+ }
+ return 0
+}
+func (x *GetShardEvacuationStatusResponse_Body) SetFailedObjects(v uint64) {
+ x.FailedObjects = v
+}
+func (x *GetShardEvacuationStatusResponse_Body) GetShard_ID() [][]byte {
+ if x != nil {
+ return x.Shard_ID
+ }
+ return nil
+}
+func (x *GetShardEvacuationStatusResponse_Body) SetShard_ID(v [][]byte) {
+ x.Shard_ID = v
+}
+func (x *GetShardEvacuationStatusResponse_Body) GetStatus() GetShardEvacuationStatusResponse_Body_Status {
+ if x != nil {
+ return x.Status
+ }
+ return 0
+}
+func (x *GetShardEvacuationStatusResponse_Body) SetStatus(v GetShardEvacuationStatusResponse_Body_Status) {
+ x.Status = v
+}
+func (x *GetShardEvacuationStatusResponse_Body) GetDuration() *GetShardEvacuationStatusResponse_Body_Duration {
+ if x != nil {
+ return x.Duration
+ }
+ return nil
+}
+func (x *GetShardEvacuationStatusResponse_Body) SetDuration(v *GetShardEvacuationStatusResponse_Body_Duration) {
+ x.Duration = v
+}
+func (x *GetShardEvacuationStatusResponse_Body) GetStartedAt() *GetShardEvacuationStatusResponse_Body_UnixTimestamp {
+ if x != nil {
+ return x.StartedAt
+ }
+ return nil
+}
+func (x *GetShardEvacuationStatusResponse_Body) SetStartedAt(v *GetShardEvacuationStatusResponse_Body_UnixTimestamp) {
+ x.StartedAt = v
+}
+func (x *GetShardEvacuationStatusResponse_Body) GetErrorMessage() string {
+ if x != nil {
+ return x.ErrorMessage
+ }
+ return ""
+}
+func (x *GetShardEvacuationStatusResponse_Body) SetErrorMessage(v string) {
+ x.ErrorMessage = v
+}
+func (x *GetShardEvacuationStatusResponse_Body) GetSkippedObjects() uint64 {
+ if x != nil {
+ return x.SkippedObjects
+ }
+ return 0
+}
+func (x *GetShardEvacuationStatusResponse_Body) SetSkippedObjects(v uint64) {
+ x.SkippedObjects = v
+}
+func (x *GetShardEvacuationStatusResponse_Body) GetTotalTrees() uint64 {
+ if x != nil {
+ return x.TotalTrees
+ }
+ return 0
+}
+func (x *GetShardEvacuationStatusResponse_Body) SetTotalTrees(v uint64) {
+ x.TotalTrees = v
+}
+func (x *GetShardEvacuationStatusResponse_Body) GetEvacuatedTrees() uint64 {
+ if x != nil {
+ return x.EvacuatedTrees
+ }
+ return 0
+}
+func (x *GetShardEvacuationStatusResponse_Body) SetEvacuatedTrees(v uint64) {
+ x.EvacuatedTrees = v
+}
+func (x *GetShardEvacuationStatusResponse_Body) GetFailedTrees() uint64 {
+ if x != nil {
+ return x.FailedTrees
+ }
+ return 0
+}
+func (x *GetShardEvacuationStatusResponse_Body) SetFailedTrees(v uint64) {
+ x.FailedTrees = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *GetShardEvacuationStatusResponse_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *GetShardEvacuationStatusResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"totalObjects\":"
+ out.RawString(prefix[1:])
+ out.Uint64(x.TotalObjects)
+ }
+ {
+ const prefix string = ",\"evacuatedObjects\":"
+ out.RawString(prefix)
+ out.Uint64(x.EvacuatedObjects)
+ }
+ {
+ const prefix string = ",\"failedObjects\":"
+ out.RawString(prefix)
+ out.Uint64(x.FailedObjects)
+ }
+ {
+ const prefix string = ",\"shardID\":"
+ out.RawString(prefix)
+ out.RawByte('[')
+ for i := range x.Shard_ID {
+ if i != 0 {
+ out.RawByte(',')
+ }
+ out.Base64Bytes(x.Shard_ID[i])
+ }
+ out.RawByte(']')
+ }
+ {
+ const prefix string = ",\"status\":"
+ out.RawString(prefix)
+ out.Int32(int32(x.Status))
+ }
+ {
+ const prefix string = ",\"duration\":"
+ out.RawString(prefix)
+ x.Duration.MarshalEasyJSON(out)
+ }
+ {
+ const prefix string = ",\"startedAt\":"
+ out.RawString(prefix)
+ x.StartedAt.MarshalEasyJSON(out)
+ }
+ {
+ const prefix string = ",\"errorMessage\":"
+ out.RawString(prefix)
+ out.String(x.ErrorMessage)
+ }
+ {
+ const prefix string = ",\"skippedObjects\":"
+ out.RawString(prefix)
+ out.Uint64(x.SkippedObjects)
+ }
+ {
+ const prefix string = ",\"totalTrees\":"
+ out.RawString(prefix)
+ out.Uint64(x.TotalTrees)
+ }
+ {
+ const prefix string = ",\"evacuatedTrees\":"
+ out.RawString(prefix)
+ out.Uint64(x.EvacuatedTrees)
+ }
+ {
+ const prefix string = ",\"failedTrees\":"
+ out.RawString(prefix)
+ out.Uint64(x.FailedTrees)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *GetShardEvacuationStatusResponse_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *GetShardEvacuationStatusResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "totalObjects":
+ {
+ var f uint64
+ f = in.Uint64()
+ x.TotalObjects = f
+ }
+ case "evacuatedObjects":
+ {
+ var f uint64
+ f = in.Uint64()
+ x.EvacuatedObjects = f
+ }
+ case "failedObjects":
+ {
+ var f uint64
+ f = in.Uint64()
+ x.FailedObjects = f
+ }
+ case "shardID":
+ {
+ var f []byte
+ var list [][]byte
+ in.Delim('[')
+ for !in.IsDelim(']') {
+ f = in.Bytes()
+ list = append(list, f)
+ in.WantComma()
+ }
+ x.Shard_ID = list
+ in.Delim(']')
+ }
+ case "status":
+ {
+ var f GetShardEvacuationStatusResponse_Body_Status
+ var parsedValue GetShardEvacuationStatusResponse_Body_Status
+ switch v := in.Interface().(type) {
+ case string:
+ if vv, ok := GetShardEvacuationStatusResponse_Body_Status_value[v]; ok {
+ parsedValue = GetShardEvacuationStatusResponse_Body_Status(vv)
+ break
+ }
+ vv, err := strconv.ParseInt(v, 10, 32)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ parsedValue = GetShardEvacuationStatusResponse_Body_Status(vv)
+ case float64:
+ parsedValue = GetShardEvacuationStatusResponse_Body_Status(v)
+ }
+ f = parsedValue
+ x.Status = f
+ }
+ case "duration":
+ {
+ var f *GetShardEvacuationStatusResponse_Body_Duration
+ f = new(GetShardEvacuationStatusResponse_Body_Duration)
+ f.UnmarshalEasyJSON(in)
+ x.Duration = f
+ }
+ case "startedAt":
+ {
+ var f *GetShardEvacuationStatusResponse_Body_UnixTimestamp
+ f = new(GetShardEvacuationStatusResponse_Body_UnixTimestamp)
+ f.UnmarshalEasyJSON(in)
+ x.StartedAt = f
+ }
+ case "errorMessage":
+ {
+ var f string
+ f = in.String()
+ x.ErrorMessage = f
+ }
+ case "skippedObjects":
+ {
+ var f uint64
+ f = in.Uint64()
+ x.SkippedObjects = f
+ }
+ case "totalTrees":
+ {
+ var f uint64
+ f = in.Uint64()
+ x.TotalTrees = f
+ }
+ case "evacuatedTrees":
+ {
+ var f uint64
+ f = in.Uint64()
+ x.EvacuatedTrees = f
+ }
+ case "failedTrees":
+ {
+ var f uint64
+ f = in.Uint64()
+ x.FailedTrees = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type GetShardEvacuationStatusResponse struct {
+ Body *GetShardEvacuationStatusResponse_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*GetShardEvacuationStatusResponse)(nil)
+ _ encoding.ProtoUnmarshaler = (*GetShardEvacuationStatusResponse)(nil)
+ _ json.Marshaler = (*GetShardEvacuationStatusResponse)(nil)
+ _ json.Unmarshaler = (*GetShardEvacuationStatusResponse)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -2038,27 +8290,6 @@ func (x *GetShardEvacuationStatusResponse) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *GetShardEvacuationStatusResponse) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -2074,13 +8305,163 @@ func (x *GetShardEvacuationStatusResponse) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *GetShardEvacuationStatusResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *GetShardEvacuationStatusResponse) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *GetShardEvacuationStatusResponse) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *GetShardEvacuationStatusResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil && x.Body.StableSize() != 0 {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil && x.Signature.StableSize() != 0 {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *GetShardEvacuationStatusResponse) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "GetShardEvacuationStatusResponse")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(GetShardEvacuationStatusResponse_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *GetShardEvacuationStatusResponse) GetBody() *GetShardEvacuationStatusResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *GetShardEvacuationStatusResponse) SetBody(v *GetShardEvacuationStatusResponse_Body) {
+ x.Body = v
+}
+func (x *GetShardEvacuationStatusResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *GetShardEvacuationStatusResponse) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *GetShardEvacuationStatusResponse) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *GetShardEvacuationStatusResponse) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ const prefix string = ",\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *GetShardEvacuationStatusResponse) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *GetShardEvacuationStatusResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *GetShardEvacuationStatusResponse_Body
+ f = new(GetShardEvacuationStatusResponse_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type ResetShardEvacuationStatusRequest_Body struct {
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*ResetShardEvacuationStatusRequest_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*ResetShardEvacuationStatusRequest_Body)(nil)
+ _ json.Marshaler = (*ResetShardEvacuationStatusRequest_Body)(nil)
+ _ json.Unmarshaler = (*ResetShardEvacuationStatusRequest_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -2091,18 +8472,93 @@ func (x *ResetShardEvacuationStatusRequest_Body) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *ResetShardEvacuationStatusRequest_Body) StableMarshal(buf []byte) []byte {
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *ResetShardEvacuationStatusRequest_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *ResetShardEvacuationStatusRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *ResetShardEvacuationStatusRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "ResetShardEvacuationStatusRequest_Body")
+ }
+ switch fc.FieldNum {
+ }
+ }
+ return nil
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *ResetShardEvacuationStatusRequest_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *ResetShardEvacuationStatusRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *ResetShardEvacuationStatusRequest_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *ResetShardEvacuationStatusRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type ResetShardEvacuationStatusRequest struct {
+ Body *ResetShardEvacuationStatusRequest_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*ResetShardEvacuationStatusRequest)(nil)
+ _ encoding.ProtoUnmarshaler = (*ResetShardEvacuationStatusRequest)(nil)
+ _ json.Marshaler = (*ResetShardEvacuationStatusRequest)(nil)
+ _ json.Unmarshaler = (*ResetShardEvacuationStatusRequest)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -2115,27 +8571,6 @@ func (x *ResetShardEvacuationStatusRequest) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *ResetShardEvacuationStatusRequest) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -2151,13 +8586,163 @@ func (x *ResetShardEvacuationStatusRequest) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *ResetShardEvacuationStatusRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *ResetShardEvacuationStatusRequest) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *ResetShardEvacuationStatusRequest) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *ResetShardEvacuationStatusRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil && x.Body.StableSize() != 0 {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil && x.Signature.StableSize() != 0 {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *ResetShardEvacuationStatusRequest) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "ResetShardEvacuationStatusRequest")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(ResetShardEvacuationStatusRequest_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *ResetShardEvacuationStatusRequest) GetBody() *ResetShardEvacuationStatusRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *ResetShardEvacuationStatusRequest) SetBody(v *ResetShardEvacuationStatusRequest_Body) {
+ x.Body = v
+}
+func (x *ResetShardEvacuationStatusRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *ResetShardEvacuationStatusRequest) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *ResetShardEvacuationStatusRequest) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *ResetShardEvacuationStatusRequest) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ const prefix string = ",\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *ResetShardEvacuationStatusRequest) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *ResetShardEvacuationStatusRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *ResetShardEvacuationStatusRequest_Body
+ f = new(ResetShardEvacuationStatusRequest_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type ResetShardEvacuationStatusResponse_Body struct {
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*ResetShardEvacuationStatusResponse_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*ResetShardEvacuationStatusResponse_Body)(nil)
+ _ json.Marshaler = (*ResetShardEvacuationStatusResponse_Body)(nil)
+ _ json.Unmarshaler = (*ResetShardEvacuationStatusResponse_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -2168,18 +8753,93 @@ func (x *ResetShardEvacuationStatusResponse_Body) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *ResetShardEvacuationStatusResponse_Body) StableMarshal(buf []byte) []byte {
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *ResetShardEvacuationStatusResponse_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *ResetShardEvacuationStatusResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *ResetShardEvacuationStatusResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "ResetShardEvacuationStatusResponse_Body")
+ }
+ switch fc.FieldNum {
+ }
+ }
+ return nil
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *ResetShardEvacuationStatusResponse_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *ResetShardEvacuationStatusResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *ResetShardEvacuationStatusResponse_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *ResetShardEvacuationStatusResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type ResetShardEvacuationStatusResponse struct {
+ Body *ResetShardEvacuationStatusResponse_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*ResetShardEvacuationStatusResponse)(nil)
+ _ encoding.ProtoUnmarshaler = (*ResetShardEvacuationStatusResponse)(nil)
+ _ json.Marshaler = (*ResetShardEvacuationStatusResponse)(nil)
+ _ json.Unmarshaler = (*ResetShardEvacuationStatusResponse)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -2192,27 +8852,6 @@ func (x *ResetShardEvacuationStatusResponse) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *ResetShardEvacuationStatusResponse) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -2228,13 +8867,163 @@ func (x *ResetShardEvacuationStatusResponse) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *ResetShardEvacuationStatusResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *ResetShardEvacuationStatusResponse) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *ResetShardEvacuationStatusResponse) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *ResetShardEvacuationStatusResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil && x.Body.StableSize() != 0 {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil && x.Signature.StableSize() != 0 {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *ResetShardEvacuationStatusResponse) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "ResetShardEvacuationStatusResponse")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(ResetShardEvacuationStatusResponse_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *ResetShardEvacuationStatusResponse) GetBody() *ResetShardEvacuationStatusResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *ResetShardEvacuationStatusResponse) SetBody(v *ResetShardEvacuationStatusResponse_Body) {
+ x.Body = v
+}
+func (x *ResetShardEvacuationStatusResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *ResetShardEvacuationStatusResponse) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *ResetShardEvacuationStatusResponse) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *ResetShardEvacuationStatusResponse) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ const prefix string = ",\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *ResetShardEvacuationStatusResponse) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *ResetShardEvacuationStatusResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *ResetShardEvacuationStatusResponse_Body
+ f = new(ResetShardEvacuationStatusResponse_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type StopShardEvacuationRequest_Body struct {
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*StopShardEvacuationRequest_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*StopShardEvacuationRequest_Body)(nil)
+ _ json.Marshaler = (*StopShardEvacuationRequest_Body)(nil)
+ _ json.Unmarshaler = (*StopShardEvacuationRequest_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -2245,18 +9034,93 @@ func (x *StopShardEvacuationRequest_Body) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *StopShardEvacuationRequest_Body) StableMarshal(buf []byte) []byte {
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *StopShardEvacuationRequest_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *StopShardEvacuationRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *StopShardEvacuationRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "StopShardEvacuationRequest_Body")
+ }
+ switch fc.FieldNum {
+ }
+ }
+ return nil
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *StopShardEvacuationRequest_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *StopShardEvacuationRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *StopShardEvacuationRequest_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *StopShardEvacuationRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type StopShardEvacuationRequest struct {
+ Body *StopShardEvacuationRequest_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*StopShardEvacuationRequest)(nil)
+ _ encoding.ProtoUnmarshaler = (*StopShardEvacuationRequest)(nil)
+ _ json.Marshaler = (*StopShardEvacuationRequest)(nil)
+ _ json.Unmarshaler = (*StopShardEvacuationRequest)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -2269,27 +9133,6 @@ func (x *StopShardEvacuationRequest) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *StopShardEvacuationRequest) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -2305,13 +9148,163 @@ func (x *StopShardEvacuationRequest) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *StopShardEvacuationRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *StopShardEvacuationRequest) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *StopShardEvacuationRequest) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *StopShardEvacuationRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil && x.Body.StableSize() != 0 {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil && x.Signature.StableSize() != 0 {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *StopShardEvacuationRequest) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "StopShardEvacuationRequest")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(StopShardEvacuationRequest_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *StopShardEvacuationRequest) GetBody() *StopShardEvacuationRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *StopShardEvacuationRequest) SetBody(v *StopShardEvacuationRequest_Body) {
+ x.Body = v
+}
+func (x *StopShardEvacuationRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *StopShardEvacuationRequest) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *StopShardEvacuationRequest) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *StopShardEvacuationRequest) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ const prefix string = ",\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *StopShardEvacuationRequest) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *StopShardEvacuationRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *StopShardEvacuationRequest_Body
+ f = new(StopShardEvacuationRequest_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type StopShardEvacuationResponse_Body struct {
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*StopShardEvacuationResponse_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*StopShardEvacuationResponse_Body)(nil)
+ _ json.Marshaler = (*StopShardEvacuationResponse_Body)(nil)
+ _ json.Unmarshaler = (*StopShardEvacuationResponse_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -2322,18 +9315,93 @@ func (x *StopShardEvacuationResponse_Body) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *StopShardEvacuationResponse_Body) StableMarshal(buf []byte) []byte {
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *StopShardEvacuationResponse_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *StopShardEvacuationResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *StopShardEvacuationResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "StopShardEvacuationResponse_Body")
+ }
+ switch fc.FieldNum {
+ }
+ }
+ return nil
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *StopShardEvacuationResponse_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *StopShardEvacuationResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *StopShardEvacuationResponse_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *StopShardEvacuationResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type StopShardEvacuationResponse struct {
+ Body *StopShardEvacuationResponse_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*StopShardEvacuationResponse)(nil)
+ _ encoding.ProtoUnmarshaler = (*StopShardEvacuationResponse)(nil)
+ _ json.Marshaler = (*StopShardEvacuationResponse)(nil)
+ _ json.Unmarshaler = (*StopShardEvacuationResponse)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -2346,27 +9414,6 @@ func (x *StopShardEvacuationResponse) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *StopShardEvacuationResponse) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -2382,13 +9429,165 @@ func (x *StopShardEvacuationResponse) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *StopShardEvacuationResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *StopShardEvacuationResponse) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *StopShardEvacuationResponse) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *StopShardEvacuationResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil && x.Body.StableSize() != 0 {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil && x.Signature.StableSize() != 0 {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *StopShardEvacuationResponse) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "StopShardEvacuationResponse")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(StopShardEvacuationResponse_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *StopShardEvacuationResponse) GetBody() *StopShardEvacuationResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *StopShardEvacuationResponse) SetBody(v *StopShardEvacuationResponse_Body) {
+ x.Body = v
+}
+func (x *StopShardEvacuationResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *StopShardEvacuationResponse) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *StopShardEvacuationResponse) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *StopShardEvacuationResponse) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ const prefix string = ",\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *StopShardEvacuationResponse) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *StopShardEvacuationResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *StopShardEvacuationResponse_Body
+ f = new(StopShardEvacuationResponse_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type AddChainLocalOverrideRequest_Body struct {
+ Target *ChainTarget `json:"target"`
+ Chain []byte `json:"chain"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*AddChainLocalOverrideRequest_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*AddChainLocalOverrideRequest_Body)(nil)
+ _ json.Marshaler = (*AddChainLocalOverrideRequest_Body)(nil)
+ _ json.Unmarshaler = (*AddChainLocalOverrideRequest_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -2401,27 +9600,158 @@ func (x *AddChainLocalOverrideRequest_Body) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *AddChainLocalOverrideRequest_Body) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Target)
- offset += proto.BytesMarshal(2, buf[offset:], x.Chain)
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *AddChainLocalOverrideRequest_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *AddChainLocalOverrideRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Target != nil && x.Target.StableSize() != 0 {
+ x.Target.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if len(x.Chain) != 0 {
+ mm.AppendBytes(2, x.Chain)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *AddChainLocalOverrideRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "AddChainLocalOverrideRequest_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // Target
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Target")
+ }
+ x.Target = new(ChainTarget)
+ if err := x.Target.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Chain
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Chain")
+ }
+ x.Chain = data
+ }
+ }
+ return nil
+}
+func (x *AddChainLocalOverrideRequest_Body) GetTarget() *ChainTarget {
+ if x != nil {
+ return x.Target
+ }
+ return nil
+}
+func (x *AddChainLocalOverrideRequest_Body) SetTarget(v *ChainTarget) {
+ x.Target = v
+}
+func (x *AddChainLocalOverrideRequest_Body) GetChain() []byte {
+ if x != nil {
+ return x.Chain
+ }
+ return nil
+}
+func (x *AddChainLocalOverrideRequest_Body) SetChain(v []byte) {
+ x.Chain = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *AddChainLocalOverrideRequest_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *AddChainLocalOverrideRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"target\":"
+ out.RawString(prefix[1:])
+ x.Target.MarshalEasyJSON(out)
+ }
+ {
+ const prefix string = ",\"chain\":"
+ out.RawString(prefix)
+ out.Base64Bytes(x.Chain)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *AddChainLocalOverrideRequest_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *AddChainLocalOverrideRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "target":
+ {
+ var f *ChainTarget
+ f = new(ChainTarget)
+ f.UnmarshalEasyJSON(in)
+ x.Target = f
+ }
+ case "chain":
+ {
+ var f []byte
+ f = in.Bytes()
+ x.Chain = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type AddChainLocalOverrideRequest struct {
+ Body *AddChainLocalOverrideRequest_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*AddChainLocalOverrideRequest)(nil)
+ _ encoding.ProtoUnmarshaler = (*AddChainLocalOverrideRequest)(nil)
+ _ json.Marshaler = (*AddChainLocalOverrideRequest)(nil)
+ _ json.Unmarshaler = (*AddChainLocalOverrideRequest)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -2434,27 +9764,6 @@ func (x *AddChainLocalOverrideRequest) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *AddChainLocalOverrideRequest) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -2470,13 +9779,164 @@ func (x *AddChainLocalOverrideRequest) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *AddChainLocalOverrideRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *AddChainLocalOverrideRequest) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *AddChainLocalOverrideRequest) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *AddChainLocalOverrideRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil && x.Body.StableSize() != 0 {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil && x.Signature.StableSize() != 0 {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *AddChainLocalOverrideRequest) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "AddChainLocalOverrideRequest")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(AddChainLocalOverrideRequest_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *AddChainLocalOverrideRequest) GetBody() *AddChainLocalOverrideRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *AddChainLocalOverrideRequest) SetBody(v *AddChainLocalOverrideRequest_Body) {
+ x.Body = v
+}
+func (x *AddChainLocalOverrideRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *AddChainLocalOverrideRequest) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *AddChainLocalOverrideRequest) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *AddChainLocalOverrideRequest) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ const prefix string = ",\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *AddChainLocalOverrideRequest) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *AddChainLocalOverrideRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *AddChainLocalOverrideRequest_Body
+ f = new(AddChainLocalOverrideRequest_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type AddChainLocalOverrideResponse_Body struct {
+ ChainId []byte `json:"chainId"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*AddChainLocalOverrideResponse_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*AddChainLocalOverrideResponse_Body)(nil)
+ _ json.Marshaler = (*AddChainLocalOverrideResponse_Body)(nil)
+ _ json.Unmarshaler = (*AddChainLocalOverrideResponse_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -2488,26 +9948,125 @@ func (x *AddChainLocalOverrideResponse_Body) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *AddChainLocalOverrideResponse_Body) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.BytesMarshal(1, buf[offset:], x.ChainId)
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *AddChainLocalOverrideResponse_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *AddChainLocalOverrideResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if len(x.ChainId) != 0 {
+ mm.AppendBytes(1, x.ChainId)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *AddChainLocalOverrideResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "AddChainLocalOverrideResponse_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // ChainId
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "ChainId")
+ }
+ x.ChainId = data
+ }
+ }
+ return nil
+}
+func (x *AddChainLocalOverrideResponse_Body) GetChainId() []byte {
+ if x != nil {
+ return x.ChainId
+ }
+ return nil
+}
+func (x *AddChainLocalOverrideResponse_Body) SetChainId(v []byte) {
+ x.ChainId = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *AddChainLocalOverrideResponse_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *AddChainLocalOverrideResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"chainId\":"
+ out.RawString(prefix[1:])
+ out.Base64Bytes(x.ChainId)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *AddChainLocalOverrideResponse_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *AddChainLocalOverrideResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "chainId":
+ {
+ var f []byte
+ f = in.Bytes()
+ x.ChainId = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type AddChainLocalOverrideResponse struct {
+ Body *AddChainLocalOverrideResponse_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*AddChainLocalOverrideResponse)(nil)
+ _ encoding.ProtoUnmarshaler = (*AddChainLocalOverrideResponse)(nil)
+ _ json.Marshaler = (*AddChainLocalOverrideResponse)(nil)
+ _ json.Unmarshaler = (*AddChainLocalOverrideResponse)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -2520,27 +10079,6 @@ func (x *AddChainLocalOverrideResponse) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *AddChainLocalOverrideResponse) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -2556,13 +10094,165 @@ func (x *AddChainLocalOverrideResponse) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *AddChainLocalOverrideResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *AddChainLocalOverrideResponse) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *AddChainLocalOverrideResponse) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *AddChainLocalOverrideResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil && x.Body.StableSize() != 0 {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil && x.Signature.StableSize() != 0 {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *AddChainLocalOverrideResponse) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "AddChainLocalOverrideResponse")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(AddChainLocalOverrideResponse_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *AddChainLocalOverrideResponse) GetBody() *AddChainLocalOverrideResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *AddChainLocalOverrideResponse) SetBody(v *AddChainLocalOverrideResponse_Body) {
+ x.Body = v
+}
+func (x *AddChainLocalOverrideResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *AddChainLocalOverrideResponse) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *AddChainLocalOverrideResponse) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *AddChainLocalOverrideResponse) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ const prefix string = ",\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *AddChainLocalOverrideResponse) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *AddChainLocalOverrideResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *AddChainLocalOverrideResponse_Body
+ f = new(AddChainLocalOverrideResponse_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type GetChainLocalOverrideRequest_Body struct {
+ Target *ChainTarget `json:"target"`
+ ChainId []byte `json:"chainId"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*GetChainLocalOverrideRequest_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*GetChainLocalOverrideRequest_Body)(nil)
+ _ json.Marshaler = (*GetChainLocalOverrideRequest_Body)(nil)
+ _ json.Unmarshaler = (*GetChainLocalOverrideRequest_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -2575,27 +10265,158 @@ func (x *GetChainLocalOverrideRequest_Body) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *GetChainLocalOverrideRequest_Body) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Target)
- offset += proto.BytesMarshal(2, buf[offset:], x.ChainId)
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *GetChainLocalOverrideRequest_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *GetChainLocalOverrideRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Target != nil && x.Target.StableSize() != 0 {
+ x.Target.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if len(x.ChainId) != 0 {
+ mm.AppendBytes(2, x.ChainId)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *GetChainLocalOverrideRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "GetChainLocalOverrideRequest_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // Target
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Target")
+ }
+ x.Target = new(ChainTarget)
+ if err := x.Target.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // ChainId
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "ChainId")
+ }
+ x.ChainId = data
+ }
+ }
+ return nil
+}
+func (x *GetChainLocalOverrideRequest_Body) GetTarget() *ChainTarget {
+ if x != nil {
+ return x.Target
+ }
+ return nil
+}
+func (x *GetChainLocalOverrideRequest_Body) SetTarget(v *ChainTarget) {
+ x.Target = v
+}
+func (x *GetChainLocalOverrideRequest_Body) GetChainId() []byte {
+ if x != nil {
+ return x.ChainId
+ }
+ return nil
+}
+func (x *GetChainLocalOverrideRequest_Body) SetChainId(v []byte) {
+ x.ChainId = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *GetChainLocalOverrideRequest_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *GetChainLocalOverrideRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"target\":"
+ out.RawString(prefix[1:])
+ x.Target.MarshalEasyJSON(out)
+ }
+ {
+ const prefix string = ",\"chainId\":"
+ out.RawString(prefix)
+ out.Base64Bytes(x.ChainId)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *GetChainLocalOverrideRequest_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *GetChainLocalOverrideRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "target":
+ {
+ var f *ChainTarget
+ f = new(ChainTarget)
+ f.UnmarshalEasyJSON(in)
+ x.Target = f
+ }
+ case "chainId":
+ {
+ var f []byte
+ f = in.Bytes()
+ x.ChainId = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type GetChainLocalOverrideRequest struct {
+ Body *GetChainLocalOverrideRequest_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*GetChainLocalOverrideRequest)(nil)
+ _ encoding.ProtoUnmarshaler = (*GetChainLocalOverrideRequest)(nil)
+ _ json.Marshaler = (*GetChainLocalOverrideRequest)(nil)
+ _ json.Unmarshaler = (*GetChainLocalOverrideRequest)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -2608,27 +10429,6 @@ func (x *GetChainLocalOverrideRequest) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *GetChainLocalOverrideRequest) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -2644,13 +10444,164 @@ func (x *GetChainLocalOverrideRequest) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *GetChainLocalOverrideRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *GetChainLocalOverrideRequest) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *GetChainLocalOverrideRequest) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *GetChainLocalOverrideRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil && x.Body.StableSize() != 0 {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil && x.Signature.StableSize() != 0 {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *GetChainLocalOverrideRequest) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "GetChainLocalOverrideRequest")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(GetChainLocalOverrideRequest_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *GetChainLocalOverrideRequest) GetBody() *GetChainLocalOverrideRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *GetChainLocalOverrideRequest) SetBody(v *GetChainLocalOverrideRequest_Body) {
+ x.Body = v
+}
+func (x *GetChainLocalOverrideRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *GetChainLocalOverrideRequest) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *GetChainLocalOverrideRequest) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *GetChainLocalOverrideRequest) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ const prefix string = ",\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *GetChainLocalOverrideRequest) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *GetChainLocalOverrideRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *GetChainLocalOverrideRequest_Body
+ f = new(GetChainLocalOverrideRequest_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type GetChainLocalOverrideResponse_Body struct {
+ Chain []byte `json:"chain"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*GetChainLocalOverrideResponse_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*GetChainLocalOverrideResponse_Body)(nil)
+ _ json.Marshaler = (*GetChainLocalOverrideResponse_Body)(nil)
+ _ json.Unmarshaler = (*GetChainLocalOverrideResponse_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -2662,26 +10613,125 @@ func (x *GetChainLocalOverrideResponse_Body) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *GetChainLocalOverrideResponse_Body) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.BytesMarshal(1, buf[offset:], x.Chain)
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *GetChainLocalOverrideResponse_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *GetChainLocalOverrideResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if len(x.Chain) != 0 {
+ mm.AppendBytes(1, x.Chain)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *GetChainLocalOverrideResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "GetChainLocalOverrideResponse_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // Chain
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Chain")
+ }
+ x.Chain = data
+ }
+ }
+ return nil
+}
+func (x *GetChainLocalOverrideResponse_Body) GetChain() []byte {
+ if x != nil {
+ return x.Chain
+ }
+ return nil
+}
+func (x *GetChainLocalOverrideResponse_Body) SetChain(v []byte) {
+ x.Chain = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *GetChainLocalOverrideResponse_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *GetChainLocalOverrideResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"chain\":"
+ out.RawString(prefix[1:])
+ out.Base64Bytes(x.Chain)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *GetChainLocalOverrideResponse_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *GetChainLocalOverrideResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "chain":
+ {
+ var f []byte
+ f = in.Bytes()
+ x.Chain = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type GetChainLocalOverrideResponse struct {
+ Body *GetChainLocalOverrideResponse_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*GetChainLocalOverrideResponse)(nil)
+ _ encoding.ProtoUnmarshaler = (*GetChainLocalOverrideResponse)(nil)
+ _ json.Marshaler = (*GetChainLocalOverrideResponse)(nil)
+ _ json.Unmarshaler = (*GetChainLocalOverrideResponse)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -2694,27 +10744,6 @@ func (x *GetChainLocalOverrideResponse) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *GetChainLocalOverrideResponse) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -2730,13 +10759,164 @@ func (x *GetChainLocalOverrideResponse) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *GetChainLocalOverrideResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *GetChainLocalOverrideResponse) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *GetChainLocalOverrideResponse) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *GetChainLocalOverrideResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil && x.Body.StableSize() != 0 {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil && x.Signature.StableSize() != 0 {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *GetChainLocalOverrideResponse) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "GetChainLocalOverrideResponse")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(GetChainLocalOverrideResponse_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *GetChainLocalOverrideResponse) GetBody() *GetChainLocalOverrideResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *GetChainLocalOverrideResponse) SetBody(v *GetChainLocalOverrideResponse_Body) {
+ x.Body = v
+}
+func (x *GetChainLocalOverrideResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *GetChainLocalOverrideResponse) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *GetChainLocalOverrideResponse) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *GetChainLocalOverrideResponse) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ const prefix string = ",\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *GetChainLocalOverrideResponse) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *GetChainLocalOverrideResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *GetChainLocalOverrideResponse_Body
+ f = new(GetChainLocalOverrideResponse_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type ListChainLocalOverridesRequest_Body struct {
+ Target *ChainTarget `json:"target"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*ListChainLocalOverridesRequest_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*ListChainLocalOverridesRequest_Body)(nil)
+ _ json.Marshaler = (*ListChainLocalOverridesRequest_Body)(nil)
+ _ json.Unmarshaler = (*ListChainLocalOverridesRequest_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -2748,26 +10928,129 @@ func (x *ListChainLocalOverridesRequest_Body) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *ListChainLocalOverridesRequest_Body) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Target)
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *ListChainLocalOverridesRequest_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *ListChainLocalOverridesRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Target != nil && x.Target.StableSize() != 0 {
+ x.Target.EmitProtobuf(mm.AppendMessage(1))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *ListChainLocalOverridesRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "ListChainLocalOverridesRequest_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // Target
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Target")
+ }
+ x.Target = new(ChainTarget)
+ if err := x.Target.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *ListChainLocalOverridesRequest_Body) GetTarget() *ChainTarget {
+ if x != nil {
+ return x.Target
+ }
+ return nil
+}
+func (x *ListChainLocalOverridesRequest_Body) SetTarget(v *ChainTarget) {
+ x.Target = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *ListChainLocalOverridesRequest_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *ListChainLocalOverridesRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"target\":"
+ out.RawString(prefix[1:])
+ x.Target.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *ListChainLocalOverridesRequest_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *ListChainLocalOverridesRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "target":
+ {
+ var f *ChainTarget
+ f = new(ChainTarget)
+ f.UnmarshalEasyJSON(in)
+ x.Target = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type ListChainLocalOverridesRequest struct {
+ Body *ListChainLocalOverridesRequest_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*ListChainLocalOverridesRequest)(nil)
+ _ encoding.ProtoUnmarshaler = (*ListChainLocalOverridesRequest)(nil)
+ _ json.Marshaler = (*ListChainLocalOverridesRequest)(nil)
+ _ json.Unmarshaler = (*ListChainLocalOverridesRequest)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -2780,27 +11063,6 @@ func (x *ListChainLocalOverridesRequest) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *ListChainLocalOverridesRequest) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -2816,13 +11078,164 @@ func (x *ListChainLocalOverridesRequest) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *ListChainLocalOverridesRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *ListChainLocalOverridesRequest) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *ListChainLocalOverridesRequest) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *ListChainLocalOverridesRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil && x.Body.StableSize() != 0 {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil && x.Signature.StableSize() != 0 {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *ListChainLocalOverridesRequest) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "ListChainLocalOverridesRequest")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(ListChainLocalOverridesRequest_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *ListChainLocalOverridesRequest) GetBody() *ListChainLocalOverridesRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *ListChainLocalOverridesRequest) SetBody(v *ListChainLocalOverridesRequest_Body) {
+ x.Body = v
+}
+func (x *ListChainLocalOverridesRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *ListChainLocalOverridesRequest) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *ListChainLocalOverridesRequest) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *ListChainLocalOverridesRequest) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ const prefix string = ",\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *ListChainLocalOverridesRequest) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *ListChainLocalOverridesRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *ListChainLocalOverridesRequest_Body
+ f = new(ListChainLocalOverridesRequest_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type ListChainLocalOverridesResponse_Body struct {
+ Chains [][]byte `json:"chains"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*ListChainLocalOverridesResponse_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*ListChainLocalOverridesResponse_Body)(nil)
+ _ json.Marshaler = (*ListChainLocalOverridesResponse_Body)(nil)
+ _ json.Unmarshaler = (*ListChainLocalOverridesResponse_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -2834,26 +11247,139 @@ func (x *ListChainLocalOverridesResponse_Body) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *ListChainLocalOverridesResponse_Body) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.RepeatedBytesMarshal(1, buf[offset:], x.Chains)
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *ListChainLocalOverridesResponse_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *ListChainLocalOverridesResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ for j := range x.Chains {
+ mm.AppendBytes(1, x.Chains[j])
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *ListChainLocalOverridesResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "ListChainLocalOverridesResponse_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // Chains
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Chains")
+ }
+ x.Chains = append(x.Chains, data)
+ }
+ }
+ return nil
+}
+func (x *ListChainLocalOverridesResponse_Body) GetChains() [][]byte {
+ if x != nil {
+ return x.Chains
+ }
+ return nil
+}
+func (x *ListChainLocalOverridesResponse_Body) SetChains(v [][]byte) {
+ x.Chains = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *ListChainLocalOverridesResponse_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *ListChainLocalOverridesResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"chains\":"
+ out.RawString(prefix[1:])
+ out.RawByte('[')
+ for i := range x.Chains {
+ if i != 0 {
+ out.RawByte(',')
+ }
+ out.Base64Bytes(x.Chains[i])
+ }
+ out.RawByte(']')
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *ListChainLocalOverridesResponse_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *ListChainLocalOverridesResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "chains":
+ {
+ var f []byte
+ var list [][]byte
+ in.Delim('[')
+ for !in.IsDelim(']') {
+ f = in.Bytes()
+ list = append(list, f)
+ in.WantComma()
+ }
+ x.Chains = list
+ in.Delim(']')
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type ListChainLocalOverridesResponse struct {
+ Body *ListChainLocalOverridesResponse_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*ListChainLocalOverridesResponse)(nil)
+ _ encoding.ProtoUnmarshaler = (*ListChainLocalOverridesResponse)(nil)
+ _ json.Marshaler = (*ListChainLocalOverridesResponse)(nil)
+ _ json.Unmarshaler = (*ListChainLocalOverridesResponse)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -2866,27 +11392,6 @@ func (x *ListChainLocalOverridesResponse) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *ListChainLocalOverridesResponse) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -2902,13 +11407,164 @@ func (x *ListChainLocalOverridesResponse) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *ListChainLocalOverridesResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *ListChainLocalOverridesResponse) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *ListChainLocalOverridesResponse) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *ListChainLocalOverridesResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil && x.Body.StableSize() != 0 {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil && x.Signature.StableSize() != 0 {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *ListChainLocalOverridesResponse) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "ListChainLocalOverridesResponse")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(ListChainLocalOverridesResponse_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *ListChainLocalOverridesResponse) GetBody() *ListChainLocalOverridesResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *ListChainLocalOverridesResponse) SetBody(v *ListChainLocalOverridesResponse_Body) {
+ x.Body = v
+}
+func (x *ListChainLocalOverridesResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *ListChainLocalOverridesResponse) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *ListChainLocalOverridesResponse) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *ListChainLocalOverridesResponse) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ const prefix string = ",\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *ListChainLocalOverridesResponse) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *ListChainLocalOverridesResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *ListChainLocalOverridesResponse_Body
+ f = new(ListChainLocalOverridesResponse_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type ListTargetsLocalOverridesRequest_Body struct {
+ ChainName string `json:"chainName"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*ListTargetsLocalOverridesRequest_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*ListTargetsLocalOverridesRequest_Body)(nil)
+ _ json.Marshaler = (*ListTargetsLocalOverridesRequest_Body)(nil)
+ _ json.Unmarshaler = (*ListTargetsLocalOverridesRequest_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -2920,26 +11576,125 @@ func (x *ListTargetsLocalOverridesRequest_Body) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *ListTargetsLocalOverridesRequest_Body) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.StringMarshal(1, buf[offset:], x.ChainName)
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *ListTargetsLocalOverridesRequest_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *ListTargetsLocalOverridesRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if len(x.ChainName) != 0 {
+ mm.AppendString(1, x.ChainName)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *ListTargetsLocalOverridesRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "ListTargetsLocalOverridesRequest_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // ChainName
+ data, ok := fc.String()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "ChainName")
+ }
+ x.ChainName = data
+ }
+ }
+ return nil
+}
+func (x *ListTargetsLocalOverridesRequest_Body) GetChainName() string {
+ if x != nil {
+ return x.ChainName
+ }
+ return ""
+}
+func (x *ListTargetsLocalOverridesRequest_Body) SetChainName(v string) {
+ x.ChainName = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *ListTargetsLocalOverridesRequest_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *ListTargetsLocalOverridesRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"chainName\":"
+ out.RawString(prefix[1:])
+ out.String(x.ChainName)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *ListTargetsLocalOverridesRequest_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *ListTargetsLocalOverridesRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "chainName":
+ {
+ var f string
+ f = in.String()
+ x.ChainName = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type ListTargetsLocalOverridesRequest struct {
+ Body *ListTargetsLocalOverridesRequest_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*ListTargetsLocalOverridesRequest)(nil)
+ _ encoding.ProtoUnmarshaler = (*ListTargetsLocalOverridesRequest)(nil)
+ _ json.Marshaler = (*ListTargetsLocalOverridesRequest)(nil)
+ _ json.Unmarshaler = (*ListTargetsLocalOverridesRequest)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -2952,27 +11707,6 @@ func (x *ListTargetsLocalOverridesRequest) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *ListTargetsLocalOverridesRequest) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -2988,13 +11722,164 @@ func (x *ListTargetsLocalOverridesRequest) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *ListTargetsLocalOverridesRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *ListTargetsLocalOverridesRequest) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *ListTargetsLocalOverridesRequest) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *ListTargetsLocalOverridesRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil && x.Body.StableSize() != 0 {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil && x.Signature.StableSize() != 0 {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *ListTargetsLocalOverridesRequest) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "ListTargetsLocalOverridesRequest")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(ListTargetsLocalOverridesRequest_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *ListTargetsLocalOverridesRequest) GetBody() *ListTargetsLocalOverridesRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *ListTargetsLocalOverridesRequest) SetBody(v *ListTargetsLocalOverridesRequest_Body) {
+ x.Body = v
+}
+func (x *ListTargetsLocalOverridesRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *ListTargetsLocalOverridesRequest) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *ListTargetsLocalOverridesRequest) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *ListTargetsLocalOverridesRequest) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ const prefix string = ",\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *ListTargetsLocalOverridesRequest) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *ListTargetsLocalOverridesRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *ListTargetsLocalOverridesRequest_Body
+ f = new(ListTargetsLocalOverridesRequest_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type ListTargetsLocalOverridesResponse_Body struct {
+ Targets []*ChainTarget `json:"targets"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*ListTargetsLocalOverridesResponse_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*ListTargetsLocalOverridesResponse_Body)(nil)
+ _ json.Marshaler = (*ListTargetsLocalOverridesResponse_Body)(nil)
+ _ json.Unmarshaler = (*ListTargetsLocalOverridesResponse_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -3008,28 +11893,146 @@ func (x *ListTargetsLocalOverridesResponse_Body) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *ListTargetsLocalOverridesResponse_Body) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- for i := range x.Targets {
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Targets[i])
- }
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *ListTargetsLocalOverridesResponse_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *ListTargetsLocalOverridesResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ for i := range x.Targets {
+ if x.Targets[i] != nil && x.Targets[i].StableSize() != 0 {
+ x.Targets[i].EmitProtobuf(mm.AppendMessage(1))
+ }
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *ListTargetsLocalOverridesResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "ListTargetsLocalOverridesResponse_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // Targets
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Targets")
+ }
+ x.Targets = append(x.Targets, new(ChainTarget))
+ ff := x.Targets[len(x.Targets)-1]
+ if err := ff.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *ListTargetsLocalOverridesResponse_Body) GetTargets() []*ChainTarget {
+ if x != nil {
+ return x.Targets
+ }
+ return nil
+}
+func (x *ListTargetsLocalOverridesResponse_Body) SetTargets(v []*ChainTarget) {
+ x.Targets = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *ListTargetsLocalOverridesResponse_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *ListTargetsLocalOverridesResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"targets\":"
+ out.RawString(prefix[1:])
+ out.RawByte('[')
+ for i := range x.Targets {
+ if i != 0 {
+ out.RawByte(',')
+ }
+ x.Targets[i].MarshalEasyJSON(out)
+ }
+ out.RawByte(']')
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *ListTargetsLocalOverridesResponse_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *ListTargetsLocalOverridesResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "targets":
+ {
+ var f *ChainTarget
+ var list []*ChainTarget
+ in.Delim('[')
+ for !in.IsDelim(']') {
+ f = new(ChainTarget)
+ f.UnmarshalEasyJSON(in)
+ list = append(list, f)
+ in.WantComma()
+ }
+ x.Targets = list
+ in.Delim(']')
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type ListTargetsLocalOverridesResponse struct {
+ Body *ListTargetsLocalOverridesResponse_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*ListTargetsLocalOverridesResponse)(nil)
+ _ encoding.ProtoUnmarshaler = (*ListTargetsLocalOverridesResponse)(nil)
+ _ json.Marshaler = (*ListTargetsLocalOverridesResponse)(nil)
+ _ json.Unmarshaler = (*ListTargetsLocalOverridesResponse)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -3042,27 +12045,6 @@ func (x *ListTargetsLocalOverridesResponse) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *ListTargetsLocalOverridesResponse) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -3078,13 +12060,165 @@ func (x *ListTargetsLocalOverridesResponse) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *ListTargetsLocalOverridesResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *ListTargetsLocalOverridesResponse) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *ListTargetsLocalOverridesResponse) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *ListTargetsLocalOverridesResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil && x.Body.StableSize() != 0 {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil && x.Signature.StableSize() != 0 {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *ListTargetsLocalOverridesResponse) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "ListTargetsLocalOverridesResponse")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(ListTargetsLocalOverridesResponse_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *ListTargetsLocalOverridesResponse) GetBody() *ListTargetsLocalOverridesResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *ListTargetsLocalOverridesResponse) SetBody(v *ListTargetsLocalOverridesResponse_Body) {
+ x.Body = v
+}
+func (x *ListTargetsLocalOverridesResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *ListTargetsLocalOverridesResponse) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *ListTargetsLocalOverridesResponse) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *ListTargetsLocalOverridesResponse) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ const prefix string = ",\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *ListTargetsLocalOverridesResponse) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *ListTargetsLocalOverridesResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *ListTargetsLocalOverridesResponse_Body
+ f = new(ListTargetsLocalOverridesResponse_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type RemoveChainLocalOverrideRequest_Body struct {
+ Target *ChainTarget `json:"target"`
+ ChainId []byte `json:"chainId"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*RemoveChainLocalOverrideRequest_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*RemoveChainLocalOverrideRequest_Body)(nil)
+ _ json.Marshaler = (*RemoveChainLocalOverrideRequest_Body)(nil)
+ _ json.Unmarshaler = (*RemoveChainLocalOverrideRequest_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -3097,27 +12231,158 @@ func (x *RemoveChainLocalOverrideRequest_Body) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *RemoveChainLocalOverrideRequest_Body) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Target)
- offset += proto.BytesMarshal(2, buf[offset:], x.ChainId)
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *RemoveChainLocalOverrideRequest_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *RemoveChainLocalOverrideRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Target != nil && x.Target.StableSize() != 0 {
+ x.Target.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if len(x.ChainId) != 0 {
+ mm.AppendBytes(2, x.ChainId)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *RemoveChainLocalOverrideRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "RemoveChainLocalOverrideRequest_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // Target
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Target")
+ }
+ x.Target = new(ChainTarget)
+ if err := x.Target.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // ChainId
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "ChainId")
+ }
+ x.ChainId = data
+ }
+ }
+ return nil
+}
+func (x *RemoveChainLocalOverrideRequest_Body) GetTarget() *ChainTarget {
+ if x != nil {
+ return x.Target
+ }
+ return nil
+}
+func (x *RemoveChainLocalOverrideRequest_Body) SetTarget(v *ChainTarget) {
+ x.Target = v
+}
+func (x *RemoveChainLocalOverrideRequest_Body) GetChainId() []byte {
+ if x != nil {
+ return x.ChainId
+ }
+ return nil
+}
+func (x *RemoveChainLocalOverrideRequest_Body) SetChainId(v []byte) {
+ x.ChainId = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *RemoveChainLocalOverrideRequest_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *RemoveChainLocalOverrideRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"target\":"
+ out.RawString(prefix[1:])
+ x.Target.MarshalEasyJSON(out)
+ }
+ {
+ const prefix string = ",\"chainId\":"
+ out.RawString(prefix)
+ out.Base64Bytes(x.ChainId)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *RemoveChainLocalOverrideRequest_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *RemoveChainLocalOverrideRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "target":
+ {
+ var f *ChainTarget
+ f = new(ChainTarget)
+ f.UnmarshalEasyJSON(in)
+ x.Target = f
+ }
+ case "chainId":
+ {
+ var f []byte
+ f = in.Bytes()
+ x.ChainId = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type RemoveChainLocalOverrideRequest struct {
+ Body *RemoveChainLocalOverrideRequest_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*RemoveChainLocalOverrideRequest)(nil)
+ _ encoding.ProtoUnmarshaler = (*RemoveChainLocalOverrideRequest)(nil)
+ _ json.Marshaler = (*RemoveChainLocalOverrideRequest)(nil)
+ _ json.Unmarshaler = (*RemoveChainLocalOverrideRequest)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -3130,27 +12395,6 @@ func (x *RemoveChainLocalOverrideRequest) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *RemoveChainLocalOverrideRequest) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -3166,13 +12410,163 @@ func (x *RemoveChainLocalOverrideRequest) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *RemoveChainLocalOverrideRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *RemoveChainLocalOverrideRequest) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *RemoveChainLocalOverrideRequest) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *RemoveChainLocalOverrideRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil && x.Body.StableSize() != 0 {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil && x.Signature.StableSize() != 0 {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *RemoveChainLocalOverrideRequest) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "RemoveChainLocalOverrideRequest")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(RemoveChainLocalOverrideRequest_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *RemoveChainLocalOverrideRequest) GetBody() *RemoveChainLocalOverrideRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *RemoveChainLocalOverrideRequest) SetBody(v *RemoveChainLocalOverrideRequest_Body) {
+ x.Body = v
+}
+func (x *RemoveChainLocalOverrideRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *RemoveChainLocalOverrideRequest) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *RemoveChainLocalOverrideRequest) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *RemoveChainLocalOverrideRequest) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ const prefix string = ",\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *RemoveChainLocalOverrideRequest) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *RemoveChainLocalOverrideRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *RemoveChainLocalOverrideRequest_Body
+ f = new(RemoveChainLocalOverrideRequest_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type RemoveChainLocalOverrideResponse_Body struct {
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*RemoveChainLocalOverrideResponse_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*RemoveChainLocalOverrideResponse_Body)(nil)
+ _ json.Marshaler = (*RemoveChainLocalOverrideResponse_Body)(nil)
+ _ json.Unmarshaler = (*RemoveChainLocalOverrideResponse_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -3183,18 +12577,93 @@ func (x *RemoveChainLocalOverrideResponse_Body) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *RemoveChainLocalOverrideResponse_Body) StableMarshal(buf []byte) []byte {
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *RemoveChainLocalOverrideResponse_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *RemoveChainLocalOverrideResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *RemoveChainLocalOverrideResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "RemoveChainLocalOverrideResponse_Body")
+ }
+ switch fc.FieldNum {
+ }
+ }
+ return nil
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *RemoveChainLocalOverrideResponse_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *RemoveChainLocalOverrideResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *RemoveChainLocalOverrideResponse_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *RemoveChainLocalOverrideResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type RemoveChainLocalOverrideResponse struct {
+ Body *RemoveChainLocalOverrideResponse_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*RemoveChainLocalOverrideResponse)(nil)
+ _ encoding.ProtoUnmarshaler = (*RemoveChainLocalOverrideResponse)(nil)
+ _ json.Marshaler = (*RemoveChainLocalOverrideResponse)(nil)
+ _ json.Unmarshaler = (*RemoveChainLocalOverrideResponse)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -3207,27 +12676,6 @@ func (x *RemoveChainLocalOverrideResponse) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *RemoveChainLocalOverrideResponse) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -3243,13 +12691,164 @@ func (x *RemoveChainLocalOverrideResponse) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *RemoveChainLocalOverrideResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *RemoveChainLocalOverrideResponse) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *RemoveChainLocalOverrideResponse) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *RemoveChainLocalOverrideResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil && x.Body.StableSize() != 0 {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil && x.Signature.StableSize() != 0 {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *RemoveChainLocalOverrideResponse) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "RemoveChainLocalOverrideResponse")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(RemoveChainLocalOverrideResponse_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *RemoveChainLocalOverrideResponse) GetBody() *RemoveChainLocalOverrideResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *RemoveChainLocalOverrideResponse) SetBody(v *RemoveChainLocalOverrideResponse_Body) {
+ x.Body = v
+}
+func (x *RemoveChainLocalOverrideResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *RemoveChainLocalOverrideResponse) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *RemoveChainLocalOverrideResponse) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *RemoveChainLocalOverrideResponse) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ const prefix string = ",\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *RemoveChainLocalOverrideResponse) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *RemoveChainLocalOverrideResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *RemoveChainLocalOverrideResponse_Body
+ f = new(RemoveChainLocalOverrideResponse_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type RemoveChainLocalOverridesByTargetRequest_Body struct {
+ Target *ChainTarget `json:"target"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*RemoveChainLocalOverridesByTargetRequest_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*RemoveChainLocalOverridesByTargetRequest_Body)(nil)
+ _ json.Marshaler = (*RemoveChainLocalOverridesByTargetRequest_Body)(nil)
+ _ json.Unmarshaler = (*RemoveChainLocalOverridesByTargetRequest_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -3261,26 +12860,129 @@ func (x *RemoveChainLocalOverridesByTargetRequest_Body) StableSize() (size int)
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *RemoveChainLocalOverridesByTargetRequest_Body) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Target)
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *RemoveChainLocalOverridesByTargetRequest_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *RemoveChainLocalOverridesByTargetRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Target != nil && x.Target.StableSize() != 0 {
+ x.Target.EmitProtobuf(mm.AppendMessage(1))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *RemoveChainLocalOverridesByTargetRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "RemoveChainLocalOverridesByTargetRequest_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // Target
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Target")
+ }
+ x.Target = new(ChainTarget)
+ if err := x.Target.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *RemoveChainLocalOverridesByTargetRequest_Body) GetTarget() *ChainTarget {
+ if x != nil {
+ return x.Target
+ }
+ return nil
+}
+func (x *RemoveChainLocalOverridesByTargetRequest_Body) SetTarget(v *ChainTarget) {
+ x.Target = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *RemoveChainLocalOverridesByTargetRequest_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *RemoveChainLocalOverridesByTargetRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"target\":"
+ out.RawString(prefix[1:])
+ x.Target.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *RemoveChainLocalOverridesByTargetRequest_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *RemoveChainLocalOverridesByTargetRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "target":
+ {
+ var f *ChainTarget
+ f = new(ChainTarget)
+ f.UnmarshalEasyJSON(in)
+ x.Target = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type RemoveChainLocalOverridesByTargetRequest struct {
+ Body *RemoveChainLocalOverridesByTargetRequest_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*RemoveChainLocalOverridesByTargetRequest)(nil)
+ _ encoding.ProtoUnmarshaler = (*RemoveChainLocalOverridesByTargetRequest)(nil)
+ _ json.Marshaler = (*RemoveChainLocalOverridesByTargetRequest)(nil)
+ _ json.Unmarshaler = (*RemoveChainLocalOverridesByTargetRequest)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -3293,27 +12995,6 @@ func (x *RemoveChainLocalOverridesByTargetRequest) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *RemoveChainLocalOverridesByTargetRequest) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -3329,13 +13010,163 @@ func (x *RemoveChainLocalOverridesByTargetRequest) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *RemoveChainLocalOverridesByTargetRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *RemoveChainLocalOverridesByTargetRequest) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *RemoveChainLocalOverridesByTargetRequest) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *RemoveChainLocalOverridesByTargetRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil && x.Body.StableSize() != 0 {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil && x.Signature.StableSize() != 0 {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *RemoveChainLocalOverridesByTargetRequest) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "RemoveChainLocalOverridesByTargetRequest")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(RemoveChainLocalOverridesByTargetRequest_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *RemoveChainLocalOverridesByTargetRequest) GetBody() *RemoveChainLocalOverridesByTargetRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *RemoveChainLocalOverridesByTargetRequest) SetBody(v *RemoveChainLocalOverridesByTargetRequest_Body) {
+ x.Body = v
+}
+func (x *RemoveChainLocalOverridesByTargetRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *RemoveChainLocalOverridesByTargetRequest) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *RemoveChainLocalOverridesByTargetRequest) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *RemoveChainLocalOverridesByTargetRequest) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ const prefix string = ",\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *RemoveChainLocalOverridesByTargetRequest) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *RemoveChainLocalOverridesByTargetRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *RemoveChainLocalOverridesByTargetRequest_Body
+ f = new(RemoveChainLocalOverridesByTargetRequest_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type RemoveChainLocalOverridesByTargetResponse_Body struct {
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*RemoveChainLocalOverridesByTargetResponse_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*RemoveChainLocalOverridesByTargetResponse_Body)(nil)
+ _ json.Marshaler = (*RemoveChainLocalOverridesByTargetResponse_Body)(nil)
+ _ json.Unmarshaler = (*RemoveChainLocalOverridesByTargetResponse_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -3346,18 +13177,93 @@ func (x *RemoveChainLocalOverridesByTargetResponse_Body) StableSize() (size int)
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *RemoveChainLocalOverridesByTargetResponse_Body) StableMarshal(buf []byte) []byte {
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *RemoveChainLocalOverridesByTargetResponse_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *RemoveChainLocalOverridesByTargetResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *RemoveChainLocalOverridesByTargetResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "RemoveChainLocalOverridesByTargetResponse_Body")
+ }
+ switch fc.FieldNum {
+ }
+ }
+ return nil
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *RemoveChainLocalOverridesByTargetResponse_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *RemoveChainLocalOverridesByTargetResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *RemoveChainLocalOverridesByTargetResponse_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *RemoveChainLocalOverridesByTargetResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type RemoveChainLocalOverridesByTargetResponse struct {
+ Body *RemoveChainLocalOverridesByTargetResponse_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*RemoveChainLocalOverridesByTargetResponse)(nil)
+ _ encoding.ProtoUnmarshaler = (*RemoveChainLocalOverridesByTargetResponse)(nil)
+ _ json.Marshaler = (*RemoveChainLocalOverridesByTargetResponse)(nil)
+ _ json.Unmarshaler = (*RemoveChainLocalOverridesByTargetResponse)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -3370,27 +13276,6 @@ func (x *RemoveChainLocalOverridesByTargetResponse) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *RemoveChainLocalOverridesByTargetResponse) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -3406,13 +13291,168 @@ func (x *RemoveChainLocalOverridesByTargetResponse) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *RemoveChainLocalOverridesByTargetResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *RemoveChainLocalOverridesByTargetResponse) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *RemoveChainLocalOverridesByTargetResponse) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *RemoveChainLocalOverridesByTargetResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil && x.Body.StableSize() != 0 {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil && x.Signature.StableSize() != 0 {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *RemoveChainLocalOverridesByTargetResponse) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "RemoveChainLocalOverridesByTargetResponse")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(RemoveChainLocalOverridesByTargetResponse_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *RemoveChainLocalOverridesByTargetResponse) GetBody() *RemoveChainLocalOverridesByTargetResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *RemoveChainLocalOverridesByTargetResponse) SetBody(v *RemoveChainLocalOverridesByTargetResponse_Body) {
+ x.Body = v
+}
+func (x *RemoveChainLocalOverridesByTargetResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *RemoveChainLocalOverridesByTargetResponse) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *RemoveChainLocalOverridesByTargetResponse) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *RemoveChainLocalOverridesByTargetResponse) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ const prefix string = ",\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *RemoveChainLocalOverridesByTargetResponse) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *RemoveChainLocalOverridesByTargetResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *RemoveChainLocalOverridesByTargetResponse_Body
+ f = new(RemoveChainLocalOverridesByTargetResponse_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type SealWriteCacheRequest_Body struct {
+ Shard_ID [][]byte `json:"shardID"`
+ IgnoreErrors bool `json:"ignoreErrors"`
+ Async bool `json:"async"`
+ RestoreMode bool `json:"restoreMode"`
+ Shrink bool `json:"shrink"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*SealWriteCacheRequest_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*SealWriteCacheRequest_Body)(nil)
+ _ json.Marshaler = (*SealWriteCacheRequest_Body)(nil)
+ _ json.Unmarshaler = (*SealWriteCacheRequest_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -3428,30 +13468,255 @@ func (x *SealWriteCacheRequest_Body) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *SealWriteCacheRequest_Body) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.RepeatedBytesMarshal(1, buf[offset:], x.Shard_ID)
- offset += proto.BoolMarshal(2, buf[offset:], x.IgnoreErrors)
- offset += proto.BoolMarshal(3, buf[offset:], x.Async)
- offset += proto.BoolMarshal(4, buf[offset:], x.RestoreMode)
- offset += proto.BoolMarshal(5, buf[offset:], x.Shrink)
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *SealWriteCacheRequest_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *SealWriteCacheRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ for j := range x.Shard_ID {
+ mm.AppendBytes(1, x.Shard_ID[j])
+ }
+ if x.IgnoreErrors {
+ mm.AppendBool(2, x.IgnoreErrors)
+ }
+ if x.Async {
+ mm.AppendBool(3, x.Async)
+ }
+ if x.RestoreMode {
+ mm.AppendBool(4, x.RestoreMode)
+ }
+ if x.Shrink {
+ mm.AppendBool(5, x.Shrink)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *SealWriteCacheRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "SealWriteCacheRequest_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // Shard_ID
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Shard_ID")
+ }
+ x.Shard_ID = append(x.Shard_ID, data)
+ case 2: // IgnoreErrors
+ data, ok := fc.Bool()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "IgnoreErrors")
+ }
+ x.IgnoreErrors = data
+ case 3: // Async
+ data, ok := fc.Bool()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Async")
+ }
+ x.Async = data
+ case 4: // RestoreMode
+ data, ok := fc.Bool()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "RestoreMode")
+ }
+ x.RestoreMode = data
+ case 5: // Shrink
+ data, ok := fc.Bool()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Shrink")
+ }
+ x.Shrink = data
+ }
+ }
+ return nil
+}
+func (x *SealWriteCacheRequest_Body) GetShard_ID() [][]byte {
+ if x != nil {
+ return x.Shard_ID
+ }
+ return nil
+}
+func (x *SealWriteCacheRequest_Body) SetShard_ID(v [][]byte) {
+ x.Shard_ID = v
+}
+func (x *SealWriteCacheRequest_Body) GetIgnoreErrors() bool {
+ if x != nil {
+ return x.IgnoreErrors
+ }
+ return false
+}
+func (x *SealWriteCacheRequest_Body) SetIgnoreErrors(v bool) {
+ x.IgnoreErrors = v
+}
+func (x *SealWriteCacheRequest_Body) GetAsync() bool {
+ if x != nil {
+ return x.Async
+ }
+ return false
+}
+func (x *SealWriteCacheRequest_Body) SetAsync(v bool) {
+ x.Async = v
+}
+func (x *SealWriteCacheRequest_Body) GetRestoreMode() bool {
+ if x != nil {
+ return x.RestoreMode
+ }
+ return false
+}
+func (x *SealWriteCacheRequest_Body) SetRestoreMode(v bool) {
+ x.RestoreMode = v
+}
+func (x *SealWriteCacheRequest_Body) GetShrink() bool {
+ if x != nil {
+ return x.Shrink
+ }
+ return false
+}
+func (x *SealWriteCacheRequest_Body) SetShrink(v bool) {
+ x.Shrink = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *SealWriteCacheRequest_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *SealWriteCacheRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"shardID\":"
+ out.RawString(prefix[1:])
+ out.RawByte('[')
+ for i := range x.Shard_ID {
+ if i != 0 {
+ out.RawByte(',')
+ }
+ out.Base64Bytes(x.Shard_ID[i])
+ }
+ out.RawByte(']')
+ }
+ {
+ const prefix string = ",\"ignoreErrors\":"
+ out.RawString(prefix)
+ out.Bool(x.IgnoreErrors)
+ }
+ {
+ const prefix string = ",\"async\":"
+ out.RawString(prefix)
+ out.Bool(x.Async)
+ }
+ {
+ const prefix string = ",\"restoreMode\":"
+ out.RawString(prefix)
+ out.Bool(x.RestoreMode)
+ }
+ {
+ const prefix string = ",\"shrink\":"
+ out.RawString(prefix)
+ out.Bool(x.Shrink)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *SealWriteCacheRequest_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *SealWriteCacheRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "shardID":
+ {
+ var f []byte
+ var list [][]byte
+ in.Delim('[')
+ for !in.IsDelim(']') {
+ f = in.Bytes()
+ list = append(list, f)
+ in.WantComma()
+ }
+ x.Shard_ID = list
+ in.Delim(']')
+ }
+ case "ignoreErrors":
+ {
+ var f bool
+ f = in.Bool()
+ x.IgnoreErrors = f
+ }
+ case "async":
+ {
+ var f bool
+ f = in.Bool()
+ x.Async = f
+ }
+ case "restoreMode":
+ {
+ var f bool
+ f = in.Bool()
+ x.RestoreMode = f
+ }
+ case "shrink":
+ {
+ var f bool
+ f = in.Bool()
+ x.Shrink = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type SealWriteCacheRequest struct {
+ Body *SealWriteCacheRequest_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*SealWriteCacheRequest)(nil)
+ _ encoding.ProtoUnmarshaler = (*SealWriteCacheRequest)(nil)
+ _ json.Marshaler = (*SealWriteCacheRequest)(nil)
+ _ json.Unmarshaler = (*SealWriteCacheRequest)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -3464,27 +13729,6 @@ func (x *SealWriteCacheRequest) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *SealWriteCacheRequest) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -3500,13 +13744,166 @@ func (x *SealWriteCacheRequest) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *SealWriteCacheRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *SealWriteCacheRequest) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *SealWriteCacheRequest) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *SealWriteCacheRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil && x.Body.StableSize() != 0 {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil && x.Signature.StableSize() != 0 {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *SealWriteCacheRequest) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "SealWriteCacheRequest")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(SealWriteCacheRequest_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *SealWriteCacheRequest) GetBody() *SealWriteCacheRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *SealWriteCacheRequest) SetBody(v *SealWriteCacheRequest_Body) {
+ x.Body = v
+}
+func (x *SealWriteCacheRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *SealWriteCacheRequest) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *SealWriteCacheRequest) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *SealWriteCacheRequest) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ const prefix string = ",\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *SealWriteCacheRequest) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *SealWriteCacheRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *SealWriteCacheRequest_Body
+ f = new(SealWriteCacheRequest_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type SealWriteCacheResponse_Body_Status struct {
+ Shard_ID []byte `json:"shardID"`
+ Success bool `json:"success"`
+ Error string `json:"error"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*SealWriteCacheResponse_Body_Status)(nil)
+ _ encoding.ProtoUnmarshaler = (*SealWriteCacheResponse_Body_Status)(nil)
+ _ json.Marshaler = (*SealWriteCacheResponse_Body_Status)(nil)
+ _ json.Unmarshaler = (*SealWriteCacheResponse_Body_Status)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -3520,28 +13917,182 @@ func (x *SealWriteCacheResponse_Body_Status) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *SealWriteCacheResponse_Body_Status) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.BytesMarshal(1, buf[offset:], x.Shard_ID)
- offset += proto.BoolMarshal(2, buf[offset:], x.Success)
- offset += proto.StringMarshal(3, buf[offset:], x.Error)
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *SealWriteCacheResponse_Body_Status) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *SealWriteCacheResponse_Body_Status) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if len(x.Shard_ID) != 0 {
+ mm.AppendBytes(1, x.Shard_ID)
+ }
+ if x.Success {
+ mm.AppendBool(2, x.Success)
+ }
+ if len(x.Error) != 0 {
+ mm.AppendString(3, x.Error)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *SealWriteCacheResponse_Body_Status) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "SealWriteCacheResponse_Body_Status")
+ }
+ switch fc.FieldNum {
+ case 1: // Shard_ID
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Shard_ID")
+ }
+ x.Shard_ID = data
+ case 2: // Success
+ data, ok := fc.Bool()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Success")
+ }
+ x.Success = data
+ case 3: // Error
+ data, ok := fc.String()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Error")
+ }
+ x.Error = data
+ }
+ }
+ return nil
+}
+func (x *SealWriteCacheResponse_Body_Status) GetShard_ID() []byte {
+ if x != nil {
+ return x.Shard_ID
+ }
+ return nil
+}
+func (x *SealWriteCacheResponse_Body_Status) SetShard_ID(v []byte) {
+ x.Shard_ID = v
+}
+func (x *SealWriteCacheResponse_Body_Status) GetSuccess() bool {
+ if x != nil {
+ return x.Success
+ }
+ return false
+}
+func (x *SealWriteCacheResponse_Body_Status) SetSuccess(v bool) {
+ x.Success = v
+}
+func (x *SealWriteCacheResponse_Body_Status) GetError() string {
+ if x != nil {
+ return x.Error
+ }
+ return ""
+}
+func (x *SealWriteCacheResponse_Body_Status) SetError(v string) {
+ x.Error = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *SealWriteCacheResponse_Body_Status) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *SealWriteCacheResponse_Body_Status) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"shardID\":"
+ out.RawString(prefix[1:])
+ out.Base64Bytes(x.Shard_ID)
+ }
+ {
+ const prefix string = ",\"success\":"
+ out.RawString(prefix)
+ out.Bool(x.Success)
+ }
+ {
+ const prefix string = ",\"error\":"
+ out.RawString(prefix)
+ out.String(x.Error)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *SealWriteCacheResponse_Body_Status) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *SealWriteCacheResponse_Body_Status) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "shardID":
+ {
+ var f []byte
+ f = in.Bytes()
+ x.Shard_ID = f
+ }
+ case "success":
+ {
+ var f bool
+ f = in.Bool()
+ x.Success = f
+ }
+ case "error":
+ {
+ var f string
+ f = in.String()
+ x.Error = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type SealWriteCacheResponse_Body struct {
+ Results []*SealWriteCacheResponse_Body_Status `json:"results"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*SealWriteCacheResponse_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*SealWriteCacheResponse_Body)(nil)
+ _ json.Marshaler = (*SealWriteCacheResponse_Body)(nil)
+ _ json.Unmarshaler = (*SealWriteCacheResponse_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -3555,28 +14106,146 @@ func (x *SealWriteCacheResponse_Body) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *SealWriteCacheResponse_Body) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- for i := range x.Results {
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Results[i])
- }
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *SealWriteCacheResponse_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *SealWriteCacheResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ for i := range x.Results {
+ if x.Results[i] != nil && x.Results[i].StableSize() != 0 {
+ x.Results[i].EmitProtobuf(mm.AppendMessage(1))
+ }
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *SealWriteCacheResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "SealWriteCacheResponse_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // Results
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Results")
+ }
+ x.Results = append(x.Results, new(SealWriteCacheResponse_Body_Status))
+ ff := x.Results[len(x.Results)-1]
+ if err := ff.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *SealWriteCacheResponse_Body) GetResults() []*SealWriteCacheResponse_Body_Status {
+ if x != nil {
+ return x.Results
+ }
+ return nil
+}
+func (x *SealWriteCacheResponse_Body) SetResults(v []*SealWriteCacheResponse_Body_Status) {
+ x.Results = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *SealWriteCacheResponse_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *SealWriteCacheResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"results\":"
+ out.RawString(prefix[1:])
+ out.RawByte('[')
+ for i := range x.Results {
+ if i != 0 {
+ out.RawByte(',')
+ }
+ x.Results[i].MarshalEasyJSON(out)
+ }
+ out.RawByte(']')
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *SealWriteCacheResponse_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *SealWriteCacheResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "results":
+ {
+ var f *SealWriteCacheResponse_Body_Status
+ var list []*SealWriteCacheResponse_Body_Status
+ in.Delim('[')
+ for !in.IsDelim(']') {
+ f = new(SealWriteCacheResponse_Body_Status)
+ f.UnmarshalEasyJSON(in)
+ list = append(list, f)
+ in.WantComma()
+ }
+ x.Results = list
+ in.Delim(']')
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type SealWriteCacheResponse struct {
+ Body *SealWriteCacheResponse_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*SealWriteCacheResponse)(nil)
+ _ encoding.ProtoUnmarshaler = (*SealWriteCacheResponse)(nil)
+ _ json.Marshaler = (*SealWriteCacheResponse)(nil)
+ _ json.Unmarshaler = (*SealWriteCacheResponse)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -3589,27 +14258,6 @@ func (x *SealWriteCacheResponse) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *SealWriteCacheResponse) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -3625,13 +14273,164 @@ func (x *SealWriteCacheResponse) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *SealWriteCacheResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *SealWriteCacheResponse) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *SealWriteCacheResponse) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *SealWriteCacheResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil && x.Body.StableSize() != 0 {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil && x.Signature.StableSize() != 0 {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *SealWriteCacheResponse) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "SealWriteCacheResponse")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(SealWriteCacheResponse_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *SealWriteCacheResponse) GetBody() *SealWriteCacheResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *SealWriteCacheResponse) SetBody(v *SealWriteCacheResponse_Body) {
+ x.Body = v
+}
+func (x *SealWriteCacheResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *SealWriteCacheResponse) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *SealWriteCacheResponse) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *SealWriteCacheResponse) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ const prefix string = ",\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *SealWriteCacheResponse) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *SealWriteCacheResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *SealWriteCacheResponse_Body
+ f = new(SealWriteCacheResponse_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type DetachShardsRequest_Body struct {
+ Shard_ID [][]byte `json:"shardID"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*DetachShardsRequest_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*DetachShardsRequest_Body)(nil)
+ _ json.Marshaler = (*DetachShardsRequest_Body)(nil)
+ _ json.Unmarshaler = (*DetachShardsRequest_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -3643,26 +14442,139 @@ func (x *DetachShardsRequest_Body) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *DetachShardsRequest_Body) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.RepeatedBytesMarshal(1, buf[offset:], x.Shard_ID)
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *DetachShardsRequest_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *DetachShardsRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ for j := range x.Shard_ID {
+ mm.AppendBytes(1, x.Shard_ID[j])
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *DetachShardsRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "DetachShardsRequest_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // Shard_ID
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Shard_ID")
+ }
+ x.Shard_ID = append(x.Shard_ID, data)
+ }
+ }
+ return nil
+}
+func (x *DetachShardsRequest_Body) GetShard_ID() [][]byte {
+ if x != nil {
+ return x.Shard_ID
+ }
+ return nil
+}
+func (x *DetachShardsRequest_Body) SetShard_ID(v [][]byte) {
+ x.Shard_ID = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *DetachShardsRequest_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *DetachShardsRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"shardID\":"
+ out.RawString(prefix[1:])
+ out.RawByte('[')
+ for i := range x.Shard_ID {
+ if i != 0 {
+ out.RawByte(',')
+ }
+ out.Base64Bytes(x.Shard_ID[i])
+ }
+ out.RawByte(']')
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *DetachShardsRequest_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *DetachShardsRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "shardID":
+ {
+ var f []byte
+ var list [][]byte
+ in.Delim('[')
+ for !in.IsDelim(']') {
+ f = in.Bytes()
+ list = append(list, f)
+ in.WantComma()
+ }
+ x.Shard_ID = list
+ in.Delim(']')
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type DetachShardsRequest struct {
+ Body *DetachShardsRequest_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*DetachShardsRequest)(nil)
+ _ encoding.ProtoUnmarshaler = (*DetachShardsRequest)(nil)
+ _ json.Marshaler = (*DetachShardsRequest)(nil)
+ _ json.Unmarshaler = (*DetachShardsRequest)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -3675,27 +14587,6 @@ func (x *DetachShardsRequest) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *DetachShardsRequest) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -3711,13 +14602,163 @@ func (x *DetachShardsRequest) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *DetachShardsRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *DetachShardsRequest) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *DetachShardsRequest) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *DetachShardsRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil && x.Body.StableSize() != 0 {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil && x.Signature.StableSize() != 0 {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *DetachShardsRequest) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "DetachShardsRequest")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(DetachShardsRequest_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *DetachShardsRequest) GetBody() *DetachShardsRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *DetachShardsRequest) SetBody(v *DetachShardsRequest_Body) {
+ x.Body = v
+}
+func (x *DetachShardsRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *DetachShardsRequest) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *DetachShardsRequest) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *DetachShardsRequest) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ const prefix string = ",\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *DetachShardsRequest) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *DetachShardsRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *DetachShardsRequest_Body
+ f = new(DetachShardsRequest_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type DetachShardsResponse_Body struct {
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*DetachShardsResponse_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*DetachShardsResponse_Body)(nil)
+ _ json.Marshaler = (*DetachShardsResponse_Body)(nil)
+ _ json.Unmarshaler = (*DetachShardsResponse_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -3728,18 +14769,93 @@ func (x *DetachShardsResponse_Body) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *DetachShardsResponse_Body) StableMarshal(buf []byte) []byte {
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *DetachShardsResponse_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *DetachShardsResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *DetachShardsResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "DetachShardsResponse_Body")
+ }
+ switch fc.FieldNum {
+ }
+ }
+ return nil
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *DetachShardsResponse_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *DetachShardsResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *DetachShardsResponse_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *DetachShardsResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type DetachShardsResponse struct {
+ Body *DetachShardsResponse_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*DetachShardsResponse)(nil)
+ _ encoding.ProtoUnmarshaler = (*DetachShardsResponse)(nil)
+ _ json.Marshaler = (*DetachShardsResponse)(nil)
+ _ json.Unmarshaler = (*DetachShardsResponse)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -3752,27 +14868,6 @@ func (x *DetachShardsResponse) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *DetachShardsResponse) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -3788,9 +14883,149 @@ func (x *DetachShardsResponse) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *DetachShardsResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *DetachShardsResponse) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *DetachShardsResponse) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *DetachShardsResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil && x.Body.StableSize() != 0 {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil && x.Signature.StableSize() != 0 {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *DetachShardsResponse) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "DetachShardsResponse")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(DetachShardsResponse_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *DetachShardsResponse) GetBody() *DetachShardsResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *DetachShardsResponse) SetBody(v *DetachShardsResponse_Body) {
+ x.Body = v
+}
+func (x *DetachShardsResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *DetachShardsResponse) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *DetachShardsResponse) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *DetachShardsResponse) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ const prefix string = ",\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *DetachShardsResponse) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *DetachShardsResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *DetachShardsResponse_Body
+ f = new(DetachShardsResponse_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
}
diff --git a/pkg/services/control/service_test.go b/pkg/services/control/service_test.go
deleted file mode 100644
index 1d98cc6f1..000000000
--- a/pkg/services/control/service_test.go
+++ /dev/null
@@ -1,181 +0,0 @@
-package control_test
-
-import (
- "bytes"
- "testing"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
-)
-
-func TestHealthCheckResponse_Body_StableMarshal(t *testing.T) {
- testStableMarshal(t,
- generateHealthCheckResponseBody(),
- new(control.HealthCheckResponse_Body),
- func(m1, m2 protoMessage) bool {
- return equalHealthCheckResponseBodies(
- m1.(*control.HealthCheckResponse_Body),
- m2.(*control.HealthCheckResponse_Body),
- )
- },
- )
-}
-
-func generateHealthCheckResponseBody() *control.HealthCheckResponse_Body {
- body := new(control.HealthCheckResponse_Body)
- body.SetNetmapStatus(control.NetmapStatus_ONLINE)
- body.SetHealthStatus(control.HealthStatus_SHUTTING_DOWN)
-
- return body
-}
-
-func equalHealthCheckResponseBodies(b1, b2 *control.HealthCheckResponse_Body) bool {
- return b1.GetNetmapStatus() == b2.GetNetmapStatus() &&
- b1.GetHealthStatus() == b2.GetHealthStatus()
-}
-
-func TestSetNetmapStatusRequest_Body_StableMarshal(t *testing.T) {
- testStableMarshal(t,
- generateSetNetmapStatusRequestBody(),
- new(control.SetNetmapStatusRequest_Body),
- func(m1, m2 protoMessage) bool {
- return equalSetnetmapStatusRequestBodies(
- m1.(*control.SetNetmapStatusRequest_Body),
- m2.(*control.SetNetmapStatusRequest_Body),
- )
- },
- )
-}
-
-func generateSetNetmapStatusRequestBody() *control.SetNetmapStatusRequest_Body {
- body := new(control.SetNetmapStatusRequest_Body)
- body.SetStatus(control.NetmapStatus_ONLINE)
-
- return body
-}
-
-func equalSetnetmapStatusRequestBodies(b1, b2 *control.SetNetmapStatusRequest_Body) bool {
- return b1.GetStatus() == b2.GetStatus()
-}
-
-func TestListShardsResponse_Body_StableMarshal(t *testing.T) {
- testStableMarshal(t,
- generateListShardsResponseBody(),
- new(control.ListShardsResponse_Body),
- func(m1, m2 protoMessage) bool {
- return equalListShardResponseBodies(
- m1.(*control.ListShardsResponse_Body),
- m2.(*control.ListShardsResponse_Body),
- )
- },
- )
-}
-
-func equalListShardResponseBodies(b1, b2 *control.ListShardsResponse_Body) bool {
- if len(b1.Shards) != len(b2.Shards) {
- return false
- }
-
- for i := range b1.Shards {
- if b1.Shards[i].GetMetabasePath() != b2.Shards[i].GetMetabasePath() ||
- b1.Shards[i].GetWritecachePath() != b2.Shards[i].GetWritecachePath() ||
- b1.Shards[i].GetPiloramaPath() != b2.Shards[i].GetPiloramaPath() ||
- !bytes.Equal(b1.Shards[i].GetShard_ID(), b2.Shards[i].GetShard_ID()) {
- return false
- }
-
- info1 := b1.Shards[i].GetBlobstor()
- info2 := b2.Shards[i].GetBlobstor()
- if !compareBlobstorInfo(info1, info2) {
- return false
- }
- }
-
- for i := range b1.Shards {
- for j := i + 1; j < len(b1.Shards); j++ {
- if b1.Shards[i].GetMetabasePath() == b2.Shards[j].GetMetabasePath() ||
- !compareBlobstorInfo(b1.Shards[i].Blobstor, b2.Shards[i].Blobstor) ||
- b1.Shards[i].GetWritecachePath() == b2.Shards[j].GetWritecachePath() ||
- bytes.Equal(b1.Shards[i].GetShard_ID(), b2.Shards[j].GetShard_ID()) {
- return false
- }
- }
- }
-
- return true
-}
-
-func compareBlobstorInfo(a, b []*control.BlobstorInfo) bool {
- if len(a) != len(b) {
- return false
- }
- for i := range a {
- if a[i].Type != b[i].Type ||
- a[i].Path != b[i].Path {
- return false
- }
- }
- return true
-}
-
-func generateListShardsResponseBody() *control.ListShardsResponse_Body {
- body := new(control.ListShardsResponse_Body)
- body.SetShards([]*control.ShardInfo{
- generateShardInfo(0),
- generateShardInfo(1),
- })
-
- return body
-}
-
-func TestSetShardModeRequest_Body_StableMarshal(t *testing.T) {
- testStableMarshal(t,
- generateSetShardModeRequestBody(),
- new(control.SetShardModeRequest_Body),
- func(m1, m2 protoMessage) bool {
- return equalSetShardModeRequestBodies(
- m1.(*control.SetShardModeRequest_Body),
- m2.(*control.SetShardModeRequest_Body),
- )
- },
- )
-}
-
-func generateSetShardModeRequestBody() *control.SetShardModeRequest_Body {
- body := new(control.SetShardModeRequest_Body)
- body.SetShardIDList([][]byte{{0, 1, 2, 3, 4}})
- body.SetMode(control.ShardMode_READ_WRITE)
-
- return body
-}
-
-func equalSetShardModeRequestBodies(b1, b2 *control.SetShardModeRequest_Body) bool {
- if b1.GetMode() != b2.GetMode() || len(b1.Shard_ID) != len(b2.Shard_ID) {
- return false
- }
-
- for i := range b1.Shard_ID {
- if !bytes.Equal(b1.Shard_ID[i], b2.Shard_ID[i]) {
- return false
- }
- }
-
- return true
-}
-
-func TestSynchronizeTreeRequest_Body_StableMarshal(t *testing.T) {
- testStableMarshal(t,
- &control.SynchronizeTreeRequest_Body{
- ContainerId: []byte{1, 2, 3, 4, 5, 6, 7},
- TreeId: "someID",
- Height: 42,
- },
- new(control.SynchronizeTreeRequest_Body),
- func(m1, m2 protoMessage) bool {
- b1 := m1.(*control.SynchronizeTreeRequest_Body)
- b2 := m2.(*control.SynchronizeTreeRequest_Body)
- return bytes.Equal(b1.GetContainerId(), b2.GetContainerId()) &&
- b1.GetTreeId() == b2.GetTreeId() &&
- b1.GetHeight() == b2.GetHeight()
- },
- )
-}
diff --git a/pkg/services/control/types.go b/pkg/services/control/types.go
deleted file mode 100644
index 94f681c55..000000000
--- a/pkg/services/control/types.go
+++ /dev/null
@@ -1,118 +0,0 @@
-package control
-
-import (
- "google.golang.org/protobuf/encoding/protojson"
-)
-
-// SetKey sets public key used for signing.
-func (x *Signature) SetKey(v []byte) {
- if x != nil {
- x.Key = v
- }
-}
-
-// SetSign sets binary signature.
-func (x *Signature) SetSign(v []byte) {
- if x != nil {
- x.Sign = v
- }
-}
-
-// SetKey sets key of the node attribute.
-func (x *NodeInfo_Attribute) SetKey(v string) {
- if x != nil {
- x.Key = v
- }
-}
-
-// SetValue sets value of the node attribute.
-func (x *NodeInfo_Attribute) SetValue(v string) {
- if x != nil {
- x.Value = v
- }
-}
-
-// SetParents sets parent keys.
-func (x *NodeInfo_Attribute) SetParents(v []string) {
- if x != nil {
- x.Parents = v
- }
-}
-
-// SetPublicKey sets public key of the FrostFS node in a binary format.
-func (x *NodeInfo) SetPublicKey(v []byte) {
- if x != nil {
- x.PublicKey = v
- }
-}
-
-// SetAddresses sets ways to connect to a node.
-func (x *NodeInfo) SetAddresses(v []string) {
- if x != nil {
- x.Addresses = v
- }
-}
-
-// SetAttributes sets attributes of the FrostFS Storage Node.
-func (x *NodeInfo) SetAttributes(v []*NodeInfo_Attribute) {
- if x != nil {
- x.Attributes = v
- }
-}
-
-// SetState sets state of the FrostFS node.
-func (x *NodeInfo) SetState(v NetmapStatus) {
- if x != nil {
- x.State = v
- }
-}
-
-// SetEpoch sets revision number of the network map.
-func (x *Netmap) SetEpoch(v uint64) {
- if x != nil {
- x.Epoch = v
- }
-}
-
-// SetNodes sets nodes presented in network.
-func (x *Netmap) SetNodes(v []*NodeInfo) {
- if x != nil {
- x.Nodes = v
- }
-}
-
-func (x *Netmap) MarshalJSON() ([]byte, error) {
- return protojson.MarshalOptions{
- EmitUnpopulated: true,
- }.Marshal(x)
-}
-
-// SetID sets identificator of the shard.
-func (x *ShardInfo) SetID(v []byte) {
- x.Shard_ID = v
-}
-
-// SetMetabasePath sets path to shard's metabase.
-func (x *ShardInfo) SetMetabasePath(v string) {
- x.MetabasePath = v
-}
-
-// SetWriteCachePath sets path to shard's write-cache.
-func (x *ShardInfo) SetWriteCachePath(v string) {
- x.WritecachePath = v
-}
-
-// SetPiloramaPath sets path to shard's pilorama.
-func (x *ShardInfo) SetPiloramaPath(v string) {
- x.PiloramaPath = v
-}
-
-// SetMode sets path to shard's work mode.
-func (x *ShardInfo) SetMode(v ShardMode) {
- x.Mode = v
-}
-
-// SetErrorCount sets shard's error counter.
-func (x *ShardInfo) SetErrorCount(count uint32) {
- x.ErrorCount = count
-}
diff --git a/pkg/services/control/types.pb.go b/pkg/services/control/types.pb.go
deleted file mode 100644
index 858755694..000000000
--- a/pkg/services/control/types.pb.go
+++ /dev/null
@@ -1,1011 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// versions:
-// protoc-gen-go v1.33.0
-// protoc v4.25.0
-// source: pkg/services/control/types.proto
-
-package control
-
-import (
- protoreflect "google.golang.org/protobuf/reflect/protoreflect"
- protoimpl "google.golang.org/protobuf/runtime/protoimpl"
- reflect "reflect"
- sync "sync"
-)
-
-const (
- // Verify that this generated code is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
- // Verify that runtime/protoimpl is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
-)
-
-// Status of the storage node in the FrostFS network map.
-type NetmapStatus int32
-
-const (
- // Undefined status, default value.
- NetmapStatus_STATUS_UNDEFINED NetmapStatus = 0
- // Node is online.
- NetmapStatus_ONLINE NetmapStatus = 1
- // Node is offline.
- NetmapStatus_OFFLINE NetmapStatus = 2
- // Node is maintained by the owner.
- NetmapStatus_MAINTENANCE NetmapStatus = 3
-)
-
-// Enum value maps for NetmapStatus.
-var (
- NetmapStatus_name = map[int32]string{
- 0: "STATUS_UNDEFINED",
- 1: "ONLINE",
- 2: "OFFLINE",
- 3: "MAINTENANCE",
- }
- NetmapStatus_value = map[string]int32{
- "STATUS_UNDEFINED": 0,
- "ONLINE": 1,
- "OFFLINE": 2,
- "MAINTENANCE": 3,
- }
-)
-
-func (x NetmapStatus) Enum() *NetmapStatus {
- p := new(NetmapStatus)
- *p = x
- return p
-}
-
-func (x NetmapStatus) String() string {
- return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
-}
-
-func (NetmapStatus) Descriptor() protoreflect.EnumDescriptor {
- return file_pkg_services_control_types_proto_enumTypes[0].Descriptor()
-}
-
-func (NetmapStatus) Type() protoreflect.EnumType {
- return &file_pkg_services_control_types_proto_enumTypes[0]
-}
-
-func (x NetmapStatus) Number() protoreflect.EnumNumber {
- return protoreflect.EnumNumber(x)
-}
-
-// Deprecated: Use NetmapStatus.Descriptor instead.
-func (NetmapStatus) EnumDescriptor() ([]byte, []int) {
- return file_pkg_services_control_types_proto_rawDescGZIP(), []int{0}
-}
-
-// Health status of the storage node application.
-type HealthStatus int32
-
-const (
- // Undefined status, default value.
- HealthStatus_HEALTH_STATUS_UNDEFINED HealthStatus = 0
- // Storage node application is starting.
- HealthStatus_STARTING HealthStatus = 1
- // Storage node application is started and serves all services.
- HealthStatus_READY HealthStatus = 2
- // Storage node application is shutting down.
- HealthStatus_SHUTTING_DOWN HealthStatus = 3
- // Storage node application is reconfiguring.
- HealthStatus_RECONFIGURING HealthStatus = 4
-)
-
-// Enum value maps for HealthStatus.
-var (
- HealthStatus_name = map[int32]string{
- 0: "HEALTH_STATUS_UNDEFINED",
- 1: "STARTING",
- 2: "READY",
- 3: "SHUTTING_DOWN",
- 4: "RECONFIGURING",
- }
- HealthStatus_value = map[string]int32{
- "HEALTH_STATUS_UNDEFINED": 0,
- "STARTING": 1,
- "READY": 2,
- "SHUTTING_DOWN": 3,
- "RECONFIGURING": 4,
- }
-)
-
-func (x HealthStatus) Enum() *HealthStatus {
- p := new(HealthStatus)
- *p = x
- return p
-}
-
-func (x HealthStatus) String() string {
- return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
-}
-
-func (HealthStatus) Descriptor() protoreflect.EnumDescriptor {
- return file_pkg_services_control_types_proto_enumTypes[1].Descriptor()
-}
-
-func (HealthStatus) Type() protoreflect.EnumType {
- return &file_pkg_services_control_types_proto_enumTypes[1]
-}
-
-func (x HealthStatus) Number() protoreflect.EnumNumber {
- return protoreflect.EnumNumber(x)
-}
-
-// Deprecated: Use HealthStatus.Descriptor instead.
-func (HealthStatus) EnumDescriptor() ([]byte, []int) {
- return file_pkg_services_control_types_proto_rawDescGZIP(), []int{1}
-}
-
-// Work mode of the shard.
-type ShardMode int32
-
-const (
- // Undefined mode, default value.
- ShardMode_SHARD_MODE_UNDEFINED ShardMode = 0
- // Read-write.
- ShardMode_READ_WRITE ShardMode = 1
- // Read-only.
- ShardMode_READ_ONLY ShardMode = 2
- // Degraded.
- ShardMode_DEGRADED ShardMode = 3
- // DegradedReadOnly.
- ShardMode_DEGRADED_READ_ONLY ShardMode = 4
-)
-
-// Enum value maps for ShardMode.
-var (
- ShardMode_name = map[int32]string{
- 0: "SHARD_MODE_UNDEFINED",
- 1: "READ_WRITE",
- 2: "READ_ONLY",
- 3: "DEGRADED",
- 4: "DEGRADED_READ_ONLY",
- }
- ShardMode_value = map[string]int32{
- "SHARD_MODE_UNDEFINED": 0,
- "READ_WRITE": 1,
- "READ_ONLY": 2,
- "DEGRADED": 3,
- "DEGRADED_READ_ONLY": 4,
- }
-)
-
-func (x ShardMode) Enum() *ShardMode {
- p := new(ShardMode)
- *p = x
- return p
-}
-
-func (x ShardMode) String() string {
- return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
-}
-
-func (ShardMode) Descriptor() protoreflect.EnumDescriptor {
- return file_pkg_services_control_types_proto_enumTypes[2].Descriptor()
-}
-
-func (ShardMode) Type() protoreflect.EnumType {
- return &file_pkg_services_control_types_proto_enumTypes[2]
-}
-
-func (x ShardMode) Number() protoreflect.EnumNumber {
- return protoreflect.EnumNumber(x)
-}
-
-// Deprecated: Use ShardMode.Descriptor instead.
-func (ShardMode) EnumDescriptor() ([]byte, []int) {
- return file_pkg_services_control_types_proto_rawDescGZIP(), []int{2}
-}
-
-type ChainTarget_TargetType int32
-
-const (
- ChainTarget_UNDEFINED ChainTarget_TargetType = 0
- ChainTarget_NAMESPACE ChainTarget_TargetType = 1
- ChainTarget_CONTAINER ChainTarget_TargetType = 2
- ChainTarget_USER ChainTarget_TargetType = 3
- ChainTarget_GROUP ChainTarget_TargetType = 4
-)
-
-// Enum value maps for ChainTarget_TargetType.
-var (
- ChainTarget_TargetType_name = map[int32]string{
- 0: "UNDEFINED",
- 1: "NAMESPACE",
- 2: "CONTAINER",
- 3: "USER",
- 4: "GROUP",
- }
- ChainTarget_TargetType_value = map[string]int32{
- "UNDEFINED": 0,
- "NAMESPACE": 1,
- "CONTAINER": 2,
- "USER": 3,
- "GROUP": 4,
- }
-)
-
-func (x ChainTarget_TargetType) Enum() *ChainTarget_TargetType {
- p := new(ChainTarget_TargetType)
- *p = x
- return p
-}
-
-func (x ChainTarget_TargetType) String() string {
- return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
-}
-
-func (ChainTarget_TargetType) Descriptor() protoreflect.EnumDescriptor {
- return file_pkg_services_control_types_proto_enumTypes[3].Descriptor()
-}
-
-func (ChainTarget_TargetType) Type() protoreflect.EnumType {
- return &file_pkg_services_control_types_proto_enumTypes[3]
-}
-
-func (x ChainTarget_TargetType) Number() protoreflect.EnumNumber {
- return protoreflect.EnumNumber(x)
-}
-
-// Deprecated: Use ChainTarget_TargetType.Descriptor instead.
-func (ChainTarget_TargetType) EnumDescriptor() ([]byte, []int) {
- return file_pkg_services_control_types_proto_rawDescGZIP(), []int{5, 0}
-}
-
-// Signature of some message.
-type Signature struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Public key used for signing.
- Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
- // Binary signature.
- Sign []byte `protobuf:"bytes,2,opt,name=sign,json=signature,proto3" json:"sign,omitempty"`
-}
-
-func (x *Signature) Reset() {
- *x = Signature{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_types_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Signature) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Signature) ProtoMessage() {}
-
-func (x *Signature) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_types_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Signature.ProtoReflect.Descriptor instead.
-func (*Signature) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_types_proto_rawDescGZIP(), []int{0}
-}
-
-func (x *Signature) GetKey() []byte {
- if x != nil {
- return x.Key
- }
- return nil
-}
-
-func (x *Signature) GetSign() []byte {
- if x != nil {
- return x.Sign
- }
- return nil
-}
-
-// FrostFS node description.
-type NodeInfo struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Public key of the FrostFS node in a binary format.
- PublicKey []byte `protobuf:"bytes,1,opt,name=public_key,json=publicKey,proto3" json:"public_key,omitempty"`
- // Ways to connect to a node.
- Addresses []string `protobuf:"bytes,2,rep,name=addresses,proto3" json:"addresses,omitempty"`
- // Carries list of the FrostFS node attributes in a key-value form. Key name
- // must be a node-unique valid UTF-8 string. Value can't be empty. NodeInfo
- // structures with duplicated attribute names or attributes with empty values
- // will be considered invalid.
- Attributes []*NodeInfo_Attribute `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty"`
- // Carries state of the FrostFS node.
- State NetmapStatus `protobuf:"varint,4,opt,name=state,proto3,enum=control.NetmapStatus" json:"state,omitempty"`
-}
-
-func (x *NodeInfo) Reset() {
- *x = NodeInfo{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_types_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *NodeInfo) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*NodeInfo) ProtoMessage() {}
-
-func (x *NodeInfo) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_types_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use NodeInfo.ProtoReflect.Descriptor instead.
-func (*NodeInfo) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_types_proto_rawDescGZIP(), []int{1}
-}
-
-func (x *NodeInfo) GetPublicKey() []byte {
- if x != nil {
- return x.PublicKey
- }
- return nil
-}
-
-func (x *NodeInfo) GetAddresses() []string {
- if x != nil {
- return x.Addresses
- }
- return nil
-}
-
-func (x *NodeInfo) GetAttributes() []*NodeInfo_Attribute {
- if x != nil {
- return x.Attributes
- }
- return nil
-}
-
-func (x *NodeInfo) GetState() NetmapStatus {
- if x != nil {
- return x.State
- }
- return NetmapStatus_STATUS_UNDEFINED
-}
-
-// Network map structure.
-type Netmap struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Network map revision number.
- Epoch uint64 `protobuf:"varint,1,opt,name=epoch,proto3" json:"epoch,omitempty"`
- // Nodes presented in network.
- Nodes []*NodeInfo `protobuf:"bytes,2,rep,name=nodes,proto3" json:"nodes,omitempty"`
-}
-
-func (x *Netmap) Reset() {
- *x = Netmap{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_types_proto_msgTypes[2]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Netmap) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Netmap) ProtoMessage() {}
-
-func (x *Netmap) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_types_proto_msgTypes[2]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Netmap.ProtoReflect.Descriptor instead.
-func (*Netmap) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_types_proto_rawDescGZIP(), []int{2}
-}
-
-func (x *Netmap) GetEpoch() uint64 {
- if x != nil {
- return x.Epoch
- }
- return 0
-}
-
-func (x *Netmap) GetNodes() []*NodeInfo {
- if x != nil {
- return x.Nodes
- }
- return nil
-}
-
-// Shard description.
-type ShardInfo struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // ID of the shard.
- Shard_ID []byte `protobuf:"bytes,1,opt,name=shard_ID,json=shardID,proto3" json:"shard_ID,omitempty"`
- // Path to shard's metabase.
- MetabasePath string `protobuf:"bytes,2,opt,name=metabase_path,json=metabasePath,proto3" json:"metabase_path,omitempty"`
- // Shard's blobstor info.
- Blobstor []*BlobstorInfo `protobuf:"bytes,3,rep,name=blobstor,proto3" json:"blobstor,omitempty"`
- // Path to shard's write-cache, empty if disabled.
- WritecachePath string `protobuf:"bytes,4,opt,name=writecache_path,json=writecachePath,proto3" json:"writecache_path,omitempty"`
- // Work mode of the shard.
- Mode ShardMode `protobuf:"varint,5,opt,name=mode,proto3,enum=control.ShardMode" json:"mode,omitempty"`
- // Amount of errors occured.
- ErrorCount uint32 `protobuf:"varint,6,opt,name=errorCount,proto3" json:"errorCount,omitempty"`
- // Path to shard's pilorama storage.
- PiloramaPath string `protobuf:"bytes,7,opt,name=pilorama_path,json=piloramaPath,proto3" json:"pilorama_path,omitempty"`
-}
-
-func (x *ShardInfo) Reset() {
- *x = ShardInfo{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_types_proto_msgTypes[3]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ShardInfo) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ShardInfo) ProtoMessage() {}
-
-func (x *ShardInfo) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_types_proto_msgTypes[3]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ShardInfo.ProtoReflect.Descriptor instead.
-func (*ShardInfo) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_types_proto_rawDescGZIP(), []int{3}
-}
-
-func (x *ShardInfo) GetShard_ID() []byte {
- if x != nil {
- return x.Shard_ID
- }
- return nil
-}
-
-func (x *ShardInfo) GetMetabasePath() string {
- if x != nil {
- return x.MetabasePath
- }
- return ""
-}
-
-func (x *ShardInfo) GetBlobstor() []*BlobstorInfo {
- if x != nil {
- return x.Blobstor
- }
- return nil
-}
-
-func (x *ShardInfo) GetWritecachePath() string {
- if x != nil {
- return x.WritecachePath
- }
- return ""
-}
-
-func (x *ShardInfo) GetMode() ShardMode {
- if x != nil {
- return x.Mode
- }
- return ShardMode_SHARD_MODE_UNDEFINED
-}
-
-func (x *ShardInfo) GetErrorCount() uint32 {
- if x != nil {
- return x.ErrorCount
- }
- return 0
-}
-
-func (x *ShardInfo) GetPiloramaPath() string {
- if x != nil {
- return x.PiloramaPath
- }
- return ""
-}
-
-// Blobstor component description.
-type BlobstorInfo struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Path to the root.
- Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"`
- // Component type.
- Type string `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"`
-}
-
-func (x *BlobstorInfo) Reset() {
- *x = BlobstorInfo{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_types_proto_msgTypes[4]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *BlobstorInfo) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*BlobstorInfo) ProtoMessage() {}
-
-func (x *BlobstorInfo) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_types_proto_msgTypes[4]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use BlobstorInfo.ProtoReflect.Descriptor instead.
-func (*BlobstorInfo) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_types_proto_rawDescGZIP(), []int{4}
-}
-
-func (x *BlobstorInfo) GetPath() string {
- if x != nil {
- return x.Path
- }
- return ""
-}
-
-func (x *BlobstorInfo) GetType() string {
- if x != nil {
- return x.Type
- }
- return ""
-}
-
-// ChainTarget is an object to which local overrides
-// are applied.
-type ChainTarget struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Type ChainTarget_TargetType `protobuf:"varint,1,opt,name=type,proto3,enum=control.ChainTarget_TargetType" json:"type,omitempty"`
- Name string `protobuf:"bytes,2,opt,name=Name,proto3" json:"Name,omitempty"`
-}
-
-func (x *ChainTarget) Reset() {
- *x = ChainTarget{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_types_proto_msgTypes[5]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ChainTarget) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ChainTarget) ProtoMessage() {}
-
-func (x *ChainTarget) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_types_proto_msgTypes[5]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ChainTarget.ProtoReflect.Descriptor instead.
-func (*ChainTarget) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_types_proto_rawDescGZIP(), []int{5}
-}
-
-func (x *ChainTarget) GetType() ChainTarget_TargetType {
- if x != nil {
- return x.Type
- }
- return ChainTarget_UNDEFINED
-}
-
-func (x *ChainTarget) GetName() string {
- if x != nil {
- return x.Name
- }
- return ""
-}
-
-// Administrator-defined Attributes of the FrostFS Storage Node.
-//
-// `Attribute` is a Key-Value metadata pair. Key name must be a valid UTF-8
-// string. Value can't be empty.
-//
-// Node's attributes are mostly used during Storage Policy evaluation to
-// calculate object's placement and find a set of nodes satisfying policy
-// requirements. There are some "well-known" node attributes common to all the
-// Storage Nodes in the network and used implicitly with default values if not
-// explicitly set:
-//
-// - Capacity \
-// Total available disk space in Gigabytes.
-// - Price \
-// Price in GAS tokens for storing one GB of data during one Epoch. In node
-// attributes it's a string presenting floating point number with comma or
-// point delimiter for decimal part. In the Network Map it will be saved as
-// 64-bit unsigned integer representing number of minimal token fractions.
-// - Locode \
-// Node's geographic location in
-// [UN/LOCODE](https://www.unece.org/cefact/codesfortrade/codes_index.html)
-// format approximated to the nearest point defined in standard.
-// - Country \
-// Country code in
-// [ISO 3166-1_alpha-2](https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2)
-// format. Calculated automatically from `Locode` attribute
-// - Region \
-// Country's administative subdivision where node is located. Calculated
-// automatically from `Locode` attribute based on `SubDiv` field. Presented
-// in [ISO 3166-2](https://en.wikipedia.org/wiki/ISO_3166-2) format.
-// - City \
-// City, town, village or rural area name where node is located written
-// without diacritics . Calculated automatically from `Locode` attribute.
-//
-// For detailed description of each well-known attribute please see the
-// corresponding section in FrostFS Technical specification.
-type NodeInfo_Attribute struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Key of the node attribute.
- Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
- // Value of the node attribute.
- Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
- // Parent keys, if any. For example for `City` it could be `Region` and
- // `Country`.
- Parents []string `protobuf:"bytes,3,rep,name=parents,proto3" json:"parents,omitempty"`
-}
-
-func (x *NodeInfo_Attribute) Reset() {
- *x = NodeInfo_Attribute{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_types_proto_msgTypes[6]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *NodeInfo_Attribute) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*NodeInfo_Attribute) ProtoMessage() {}
-
-func (x *NodeInfo_Attribute) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_types_proto_msgTypes[6]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use NodeInfo_Attribute.ProtoReflect.Descriptor instead.
-func (*NodeInfo_Attribute) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_types_proto_rawDescGZIP(), []int{1, 0}
-}
-
-func (x *NodeInfo_Attribute) GetKey() string {
- if x != nil {
- return x.Key
- }
- return ""
-}
-
-func (x *NodeInfo_Attribute) GetValue() string {
- if x != nil {
- return x.Value
- }
- return ""
-}
-
-func (x *NodeInfo_Attribute) GetParents() []string {
- if x != nil {
- return x.Parents
- }
- return nil
-}
-
-var File_pkg_services_control_types_proto protoreflect.FileDescriptor
-
-var file_pkg_services_control_types_proto_rawDesc = []byte{
- 0x0a, 0x20, 0x70, 0x6b, 0x67, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x63,
- 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x12, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x22, 0x36, 0x0a, 0x09, 0x53,
- 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x17, 0x0a, 0x04, 0x73, 0x69,
- 0x67, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74,
- 0x75, 0x72, 0x65, 0x22, 0x80, 0x02, 0x0a, 0x08, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f,
- 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12,
- 0x1c, 0x0a, 0x09, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03,
- 0x28, 0x09, 0x52, 0x09, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x12, 0x3b, 0x0a,
- 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28,
- 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x4e, 0x6f, 0x64, 0x65,
- 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x52, 0x0a,
- 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x2b, 0x0a, 0x05, 0x73, 0x74,
- 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x63, 0x6f, 0x6e, 0x74,
- 0x72, 0x6f, 0x6c, 0x2e, 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
- 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x1a, 0x4d, 0x0a, 0x09, 0x41, 0x74, 0x74, 0x72, 0x69,
- 0x62, 0x75, 0x74, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x18, 0x0a, 0x07,
- 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x70,
- 0x61, 0x72, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x47, 0x0a, 0x06, 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70,
- 0x12, 0x14, 0x0a, 0x05, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52,
- 0x05, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x12, 0x27, 0x0a, 0x05, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x18,
- 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e,
- 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x05, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x22,
- 0x94, 0x02, 0x0a, 0x09, 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x19, 0x0a,
- 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52,
- 0x07, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x44, 0x12, 0x23, 0x0a, 0x0d, 0x6d, 0x65, 0x74, 0x61,
- 0x62, 0x61, 0x73, 0x65, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x0c, 0x6d, 0x65, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x50, 0x61, 0x74, 0x68, 0x12, 0x31, 0x0a,
- 0x08, 0x62, 0x6c, 0x6f, 0x62, 0x73, 0x74, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32,
- 0x15, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x42, 0x6c, 0x6f, 0x62, 0x73, 0x74,
- 0x6f, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x62, 0x6c, 0x6f, 0x62, 0x73, 0x74, 0x6f, 0x72,
- 0x12, 0x27, 0x0a, 0x0f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f, 0x70,
- 0x61, 0x74, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x77, 0x72, 0x69, 0x74, 0x65,
- 0x63, 0x61, 0x63, 0x68, 0x65, 0x50, 0x61, 0x74, 0x68, 0x12, 0x26, 0x0a, 0x04, 0x6d, 0x6f, 0x64,
- 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f,
- 0x6c, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x6d, 0x6f, 0x64,
- 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x18,
- 0x06, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x43, 0x6f, 0x75, 0x6e,
- 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x69, 0x6c, 0x6f, 0x72, 0x61, 0x6d, 0x61, 0x5f, 0x70, 0x61,
- 0x74, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x70, 0x69, 0x6c, 0x6f, 0x72, 0x61,
- 0x6d, 0x61, 0x50, 0x61, 0x74, 0x68, 0x22, 0x36, 0x0a, 0x0c, 0x42, 0x6c, 0x6f, 0x62, 0x73, 0x74,
- 0x6f, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79,
- 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x22, 0xa6,
- 0x01, 0x0a, 0x0b, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x33,
- 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1f, 0x2e, 0x63,
- 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x54, 0x61, 0x72, 0x67,
- 0x65, 0x74, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74,
- 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x4e, 0x0a, 0x0a, 0x54, 0x61, 0x72, 0x67, 0x65,
- 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0d, 0x0a, 0x09, 0x55, 0x4e, 0x44, 0x45, 0x46, 0x49, 0x4e,
- 0x45, 0x44, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x4e, 0x41, 0x4d, 0x45, 0x53, 0x50, 0x41, 0x43,
- 0x45, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x4e, 0x54, 0x41, 0x49, 0x4e, 0x45, 0x52,
- 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x55, 0x53, 0x45, 0x52, 0x10, 0x03, 0x12, 0x09, 0x0a, 0x05,
- 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x04, 0x2a, 0x4e, 0x0a, 0x0c, 0x4e, 0x65, 0x74, 0x6d, 0x61,
- 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x14, 0x0a, 0x10, 0x53, 0x54, 0x41, 0x54, 0x55,
- 0x53, 0x5f, 0x55, 0x4e, 0x44, 0x45, 0x46, 0x49, 0x4e, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a,
- 0x06, 0x4f, 0x4e, 0x4c, 0x49, 0x4e, 0x45, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x4f, 0x46, 0x46,
- 0x4c, 0x49, 0x4e, 0x45, 0x10, 0x02, 0x12, 0x0f, 0x0a, 0x0b, 0x4d, 0x41, 0x49, 0x4e, 0x54, 0x45,
- 0x4e, 0x41, 0x4e, 0x43, 0x45, 0x10, 0x03, 0x2a, 0x6a, 0x0a, 0x0c, 0x48, 0x65, 0x61, 0x6c, 0x74,
- 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1b, 0x0a, 0x17, 0x48, 0x45, 0x41, 0x4c, 0x54,
- 0x48, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x55, 0x4e, 0x44, 0x45, 0x46, 0x49, 0x4e,
- 0x45, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x54, 0x41, 0x52, 0x54, 0x49, 0x4e, 0x47,
- 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x52, 0x45, 0x41, 0x44, 0x59, 0x10, 0x02, 0x12, 0x11, 0x0a,
- 0x0d, 0x53, 0x48, 0x55, 0x54, 0x54, 0x49, 0x4e, 0x47, 0x5f, 0x44, 0x4f, 0x57, 0x4e, 0x10, 0x03,
- 0x12, 0x11, 0x0a, 0x0d, 0x52, 0x45, 0x43, 0x4f, 0x4e, 0x46, 0x49, 0x47, 0x55, 0x52, 0x49, 0x4e,
- 0x47, 0x10, 0x04, 0x2a, 0x6a, 0x0a, 0x09, 0x53, 0x68, 0x61, 0x72, 0x64, 0x4d, 0x6f, 0x64, 0x65,
- 0x12, 0x18, 0x0a, 0x14, 0x53, 0x48, 0x41, 0x52, 0x44, 0x5f, 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x55,
- 0x4e, 0x44, 0x45, 0x46, 0x49, 0x4e, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x52, 0x45,
- 0x41, 0x44, 0x5f, 0x57, 0x52, 0x49, 0x54, 0x45, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x52, 0x45,
- 0x41, 0x44, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x12, 0x0c, 0x0a, 0x08, 0x44, 0x45, 0x47,
- 0x52, 0x41, 0x44, 0x45, 0x44, 0x10, 0x03, 0x12, 0x16, 0x0a, 0x12, 0x44, 0x45, 0x47, 0x52, 0x41,
- 0x44, 0x45, 0x44, 0x5f, 0x52, 0x45, 0x41, 0x44, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x04, 0x42,
- 0x41, 0x5a, 0x3f, 0x67, 0x69, 0x74, 0x2e, 0x66, 0x72, 0x6f, 0x73, 0x74, 0x66, 0x73, 0x2e, 0x69,
- 0x6e, 0x66, 0x6f, 0x2f, 0x54, 0x72, 0x75, 0x65, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x4c, 0x61, 0x62,
- 0x2f, 0x66, 0x72, 0x6f, 0x73, 0x74, 0x66, 0x73, 0x2d, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x70, 0x6b,
- 0x67, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x72,
- 0x6f, 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
-
-var (
- file_pkg_services_control_types_proto_rawDescOnce sync.Once
- file_pkg_services_control_types_proto_rawDescData = file_pkg_services_control_types_proto_rawDesc
-)
-
-func file_pkg_services_control_types_proto_rawDescGZIP() []byte {
- file_pkg_services_control_types_proto_rawDescOnce.Do(func() {
- file_pkg_services_control_types_proto_rawDescData = protoimpl.X.CompressGZIP(file_pkg_services_control_types_proto_rawDescData)
- })
- return file_pkg_services_control_types_proto_rawDescData
-}
-
-var file_pkg_services_control_types_proto_enumTypes = make([]protoimpl.EnumInfo, 4)
-var file_pkg_services_control_types_proto_msgTypes = make([]protoimpl.MessageInfo, 7)
-var file_pkg_services_control_types_proto_goTypes = []interface{}{
- (NetmapStatus)(0), // 0: control.NetmapStatus
- (HealthStatus)(0), // 1: control.HealthStatus
- (ShardMode)(0), // 2: control.ShardMode
- (ChainTarget_TargetType)(0), // 3: control.ChainTarget.TargetType
- (*Signature)(nil), // 4: control.Signature
- (*NodeInfo)(nil), // 5: control.NodeInfo
- (*Netmap)(nil), // 6: control.Netmap
- (*ShardInfo)(nil), // 7: control.ShardInfo
- (*BlobstorInfo)(nil), // 8: control.BlobstorInfo
- (*ChainTarget)(nil), // 9: control.ChainTarget
- (*NodeInfo_Attribute)(nil), // 10: control.NodeInfo.Attribute
-}
-var file_pkg_services_control_types_proto_depIdxs = []int32{
- 10, // 0: control.NodeInfo.attributes:type_name -> control.NodeInfo.Attribute
- 0, // 1: control.NodeInfo.state:type_name -> control.NetmapStatus
- 5, // 2: control.Netmap.nodes:type_name -> control.NodeInfo
- 8, // 3: control.ShardInfo.blobstor:type_name -> control.BlobstorInfo
- 2, // 4: control.ShardInfo.mode:type_name -> control.ShardMode
- 3, // 5: control.ChainTarget.type:type_name -> control.ChainTarget.TargetType
- 6, // [6:6] is the sub-list for method output_type
- 6, // [6:6] is the sub-list for method input_type
- 6, // [6:6] is the sub-list for extension type_name
- 6, // [6:6] is the sub-list for extension extendee
- 0, // [0:6] is the sub-list for field type_name
-}
-
-func init() { file_pkg_services_control_types_proto_init() }
-func file_pkg_services_control_types_proto_init() {
- if File_pkg_services_control_types_proto != nil {
- return
- }
- if !protoimpl.UnsafeEnabled {
- file_pkg_services_control_types_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Signature); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_types_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*NodeInfo); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_types_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Netmap); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_types_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ShardInfo); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_types_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*BlobstorInfo); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_types_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ChainTarget); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_types_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*NodeInfo_Attribute); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
- type x struct{}
- out := protoimpl.TypeBuilder{
- File: protoimpl.DescBuilder{
- GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_pkg_services_control_types_proto_rawDesc,
- NumEnums: 4,
- NumMessages: 7,
- NumExtensions: 0,
- NumServices: 0,
- },
- GoTypes: file_pkg_services_control_types_proto_goTypes,
- DependencyIndexes: file_pkg_services_control_types_proto_depIdxs,
- EnumInfos: file_pkg_services_control_types_proto_enumTypes,
- MessageInfos: file_pkg_services_control_types_proto_msgTypes,
- }.Build()
- File_pkg_services_control_types_proto = out.File
- file_pkg_services_control_types_proto_rawDesc = nil
- file_pkg_services_control_types_proto_goTypes = nil
- file_pkg_services_control_types_proto_depIdxs = nil
-}
diff --git a/pkg/services/control/types_frostfs.pb.go b/pkg/services/control/types_frostfs.pb.go
index 858d85a1c..9aff26a98 100644
--- a/pkg/services/control/types_frostfs.pb.go
+++ b/pkg/services/control/types_frostfs.pb.go
@@ -2,7 +2,149 @@
package control
-import "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/proto"
+import (
+ json "encoding/json"
+ fmt "fmt"
+ pool "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/pool"
+ proto "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/proto"
+ encoding "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/proto/encoding"
+ easyproto "github.com/VictoriaMetrics/easyproto"
+ jlexer "github.com/mailru/easyjson/jlexer"
+ jwriter "github.com/mailru/easyjson/jwriter"
+ strconv "strconv"
+)
+
+type NetmapStatus int32
+
+const (
+ NetmapStatus_STATUS_UNDEFINED NetmapStatus = 0
+ NetmapStatus_ONLINE NetmapStatus = 1
+ NetmapStatus_OFFLINE NetmapStatus = 2
+ NetmapStatus_MAINTENANCE NetmapStatus = 3
+)
+
+var (
+ NetmapStatus_name = map[int32]string{
+ 0: "STATUS_UNDEFINED",
+ 1: "ONLINE",
+ 2: "OFFLINE",
+ 3: "MAINTENANCE",
+ }
+ NetmapStatus_value = map[string]int32{
+ "STATUS_UNDEFINED": 0,
+ "ONLINE": 1,
+ "OFFLINE": 2,
+ "MAINTENANCE": 3,
+ }
+)
+
+func (x NetmapStatus) String() string {
+ if v, ok := NetmapStatus_name[int32(x)]; ok {
+ return v
+ }
+ return strconv.FormatInt(int64(x), 10)
+}
+func (x *NetmapStatus) FromString(s string) bool {
+ if v, ok := NetmapStatus_value[s]; ok {
+ *x = NetmapStatus(v)
+ return true
+ }
+ return false
+}
+
+type HealthStatus int32
+
+const (
+ HealthStatus_HEALTH_STATUS_UNDEFINED HealthStatus = 0
+ HealthStatus_STARTING HealthStatus = 1
+ HealthStatus_READY HealthStatus = 2
+ HealthStatus_SHUTTING_DOWN HealthStatus = 3
+ HealthStatus_RECONFIGURING HealthStatus = 4
+)
+
+var (
+ HealthStatus_name = map[int32]string{
+ 0: "HEALTH_STATUS_UNDEFINED",
+ 1: "STARTING",
+ 2: "READY",
+ 3: "SHUTTING_DOWN",
+ 4: "RECONFIGURING",
+ }
+ HealthStatus_value = map[string]int32{
+ "HEALTH_STATUS_UNDEFINED": 0,
+ "STARTING": 1,
+ "READY": 2,
+ "SHUTTING_DOWN": 3,
+ "RECONFIGURING": 4,
+ }
+)
+
+func (x HealthStatus) String() string {
+ if v, ok := HealthStatus_name[int32(x)]; ok {
+ return v
+ }
+ return strconv.FormatInt(int64(x), 10)
+}
+func (x *HealthStatus) FromString(s string) bool {
+ if v, ok := HealthStatus_value[s]; ok {
+ *x = HealthStatus(v)
+ return true
+ }
+ return false
+}
+
+type ShardMode int32
+
+const (
+ ShardMode_SHARD_MODE_UNDEFINED ShardMode = 0
+ ShardMode_READ_WRITE ShardMode = 1
+ ShardMode_READ_ONLY ShardMode = 2
+ ShardMode_DEGRADED ShardMode = 3
+ ShardMode_DEGRADED_READ_ONLY ShardMode = 4
+)
+
+var (
+ ShardMode_name = map[int32]string{
+ 0: "SHARD_MODE_UNDEFINED",
+ 1: "READ_WRITE",
+ 2: "READ_ONLY",
+ 3: "DEGRADED",
+ 4: "DEGRADED_READ_ONLY",
+ }
+ ShardMode_value = map[string]int32{
+ "SHARD_MODE_UNDEFINED": 0,
+ "READ_WRITE": 1,
+ "READ_ONLY": 2,
+ "DEGRADED": 3,
+ "DEGRADED_READ_ONLY": 4,
+ }
+)
+
+func (x ShardMode) String() string {
+ if v, ok := ShardMode_name[int32(x)]; ok {
+ return v
+ }
+ return strconv.FormatInt(int64(x), 10)
+}
+func (x *ShardMode) FromString(s string) bool {
+ if v, ok := ShardMode_value[s]; ok {
+ *x = ShardMode(v)
+ return true
+ }
+ return false
+}
+
+type Signature struct {
+ Key []byte `json:"key"`
+ Sign []byte `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*Signature)(nil)
+ _ encoding.ProtoUnmarshaler = (*Signature)(nil)
+ _ json.Marshaler = (*Signature)(nil)
+ _ json.Unmarshaler = (*Signature)(nil)
+)
// StableSize returns the size of x in protobuf format.
//
@@ -16,27 +158,155 @@ func (x *Signature) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *Signature) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.BytesMarshal(1, buf[offset:], x.Key)
- offset += proto.BytesMarshal(2, buf[offset:], x.Sign)
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *Signature) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *Signature) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if len(x.Key) != 0 {
+ mm.AppendBytes(1, x.Key)
+ }
+ if len(x.Sign) != 0 {
+ mm.AppendBytes(2, x.Sign)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *Signature) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "Signature")
+ }
+ switch fc.FieldNum {
+ case 1: // Key
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Key")
+ }
+ x.Key = data
+ case 2: // Sign
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Sign")
+ }
+ x.Sign = data
+ }
+ }
+ return nil
+}
+func (x *Signature) GetKey() []byte {
+ if x != nil {
+ return x.Key
+ }
+ return nil
+}
+func (x *Signature) SetKey(v []byte) {
+ x.Key = v
+}
+func (x *Signature) GetSign() []byte {
+ if x != nil {
+ return x.Sign
+ }
+ return nil
+}
+func (x *Signature) SetSign(v []byte) {
+ x.Sign = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *Signature) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *Signature) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"key\":"
+ out.RawString(prefix[1:])
+ out.Base64Bytes(x.Key)
+ }
+ {
+ const prefix string = ",\"signature\":"
+ out.RawString(prefix)
+ out.Base64Bytes(x.Sign)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *Signature) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *Signature) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "key":
+ {
+ var f []byte
+ f = in.Bytes()
+ x.Key = f
+ }
+ case "signature":
+ {
+ var f []byte
+ f = in.Bytes()
+ x.Sign = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type NodeInfo_Attribute struct {
+ Key string `json:"key"`
+ Value string `json:"value"`
+ Parents []string `json:"parents"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*NodeInfo_Attribute)(nil)
+ _ encoding.ProtoUnmarshaler = (*NodeInfo_Attribute)(nil)
+ _ json.Marshaler = (*NodeInfo_Attribute)(nil)
+ _ json.Unmarshaler = (*NodeInfo_Attribute)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -50,28 +320,199 @@ func (x *NodeInfo_Attribute) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *NodeInfo_Attribute) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.StringMarshal(1, buf[offset:], x.Key)
- offset += proto.StringMarshal(2, buf[offset:], x.Value)
- offset += proto.RepeatedStringMarshal(3, buf[offset:], x.Parents)
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *NodeInfo_Attribute) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *NodeInfo_Attribute) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if len(x.Key) != 0 {
+ mm.AppendString(1, x.Key)
+ }
+ if len(x.Value) != 0 {
+ mm.AppendString(2, x.Value)
+ }
+ for j := range x.Parents {
+ mm.AppendString(3, x.Parents[j])
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *NodeInfo_Attribute) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "NodeInfo_Attribute")
+ }
+ switch fc.FieldNum {
+ case 1: // Key
+ data, ok := fc.String()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Key")
+ }
+ x.Key = data
+ case 2: // Value
+ data, ok := fc.String()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Value")
+ }
+ x.Value = data
+ case 3: // Parents
+ data, ok := fc.String()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Parents")
+ }
+ x.Parents = append(x.Parents, data)
+ }
+ }
+ return nil
+}
+func (x *NodeInfo_Attribute) GetKey() string {
+ if x != nil {
+ return x.Key
+ }
+ return ""
+}
+func (x *NodeInfo_Attribute) SetKey(v string) {
+ x.Key = v
+}
+func (x *NodeInfo_Attribute) GetValue() string {
+ if x != nil {
+ return x.Value
+ }
+ return ""
+}
+func (x *NodeInfo_Attribute) SetValue(v string) {
+ x.Value = v
+}
+func (x *NodeInfo_Attribute) GetParents() []string {
+ if x != nil {
+ return x.Parents
+ }
+ return nil
+}
+func (x *NodeInfo_Attribute) SetParents(v []string) {
+ x.Parents = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *NodeInfo_Attribute) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *NodeInfo_Attribute) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"key\":"
+ out.RawString(prefix[1:])
+ out.String(x.Key)
+ }
+ {
+ const prefix string = ",\"value\":"
+ out.RawString(prefix)
+ out.String(x.Value)
+ }
+ {
+ const prefix string = ",\"parents\":"
+ out.RawString(prefix)
+ out.RawByte('[')
+ for i := range x.Parents {
+ if i != 0 {
+ out.RawByte(',')
+ }
+ out.String(x.Parents[i])
+ }
+ out.RawByte(']')
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *NodeInfo_Attribute) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *NodeInfo_Attribute) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "key":
+ {
+ var f string
+ f = in.String()
+ x.Key = f
+ }
+ case "value":
+ {
+ var f string
+ f = in.String()
+ x.Value = f
+ }
+ case "parents":
+ {
+ var f string
+ var list []string
+ in.Delim('[')
+ for !in.IsDelim(']') {
+ f = in.String()
+ list = append(list, f)
+ in.WantComma()
+ }
+ x.Parents = list
+ in.Delim(']')
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type NodeInfo struct {
+ PublicKey []byte `json:"publicKey"`
+ Addresses []string `json:"addresses"`
+ Attributes []*NodeInfo_Attribute `json:"attributes"`
+ State NetmapStatus `json:"state"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*NodeInfo)(nil)
+ _ encoding.ProtoUnmarshaler = (*NodeInfo)(nil)
+ _ json.Marshaler = (*NodeInfo)(nil)
+ _ json.Unmarshaler = (*NodeInfo)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -88,31 +529,263 @@ func (x *NodeInfo) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *NodeInfo) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.BytesMarshal(1, buf[offset:], x.PublicKey)
- offset += proto.RepeatedStringMarshal(2, buf[offset:], x.Addresses)
- for i := range x.Attributes {
- offset += proto.NestedStructureMarshal(3, buf[offset:], x.Attributes[i])
- }
- offset += proto.EnumMarshal(4, buf[offset:], int32(x.State))
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *NodeInfo) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *NodeInfo) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if len(x.PublicKey) != 0 {
+ mm.AppendBytes(1, x.PublicKey)
+ }
+ for j := range x.Addresses {
+ mm.AppendString(2, x.Addresses[j])
+ }
+ for i := range x.Attributes {
+ if x.Attributes[i] != nil && x.Attributes[i].StableSize() != 0 {
+ x.Attributes[i].EmitProtobuf(mm.AppendMessage(3))
+ }
+ }
+ if int32(x.State) != 0 {
+ mm.AppendInt32(4, int32(x.State))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *NodeInfo) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "NodeInfo")
+ }
+ switch fc.FieldNum {
+ case 1: // PublicKey
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "PublicKey")
+ }
+ x.PublicKey = data
+ case 2: // Addresses
+ data, ok := fc.String()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Addresses")
+ }
+ x.Addresses = append(x.Addresses, data)
+ case 3: // Attributes
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Attributes")
+ }
+ x.Attributes = append(x.Attributes, new(NodeInfo_Attribute))
+ ff := x.Attributes[len(x.Attributes)-1]
+ if err := ff.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 4: // State
+ data, ok := fc.Int32()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "State")
+ }
+ x.State = NetmapStatus(data)
+ }
+ }
+ return nil
+}
+func (x *NodeInfo) GetPublicKey() []byte {
+ if x != nil {
+ return x.PublicKey
+ }
+ return nil
+}
+func (x *NodeInfo) SetPublicKey(v []byte) {
+ x.PublicKey = v
+}
+func (x *NodeInfo) GetAddresses() []string {
+ if x != nil {
+ return x.Addresses
+ }
+ return nil
+}
+func (x *NodeInfo) SetAddresses(v []string) {
+ x.Addresses = v
+}
+func (x *NodeInfo) GetAttributes() []*NodeInfo_Attribute {
+ if x != nil {
+ return x.Attributes
+ }
+ return nil
+}
+func (x *NodeInfo) SetAttributes(v []*NodeInfo_Attribute) {
+ x.Attributes = v
+}
+func (x *NodeInfo) GetState() NetmapStatus {
+ if x != nil {
+ return x.State
+ }
+ return 0
+}
+func (x *NodeInfo) SetState(v NetmapStatus) {
+ x.State = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *NodeInfo) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *NodeInfo) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"publicKey\":"
+ out.RawString(prefix[1:])
+ out.Base64Bytes(x.PublicKey)
+ }
+ {
+ const prefix string = ",\"addresses\":"
+ out.RawString(prefix)
+ out.RawByte('[')
+ for i := range x.Addresses {
+ if i != 0 {
+ out.RawByte(',')
+ }
+ out.String(x.Addresses[i])
+ }
+ out.RawByte(']')
+ }
+ {
+ const prefix string = ",\"attributes\":"
+ out.RawString(prefix)
+ out.RawByte('[')
+ for i := range x.Attributes {
+ if i != 0 {
+ out.RawByte(',')
+ }
+ x.Attributes[i].MarshalEasyJSON(out)
+ }
+ out.RawByte(']')
+ }
+ {
+ const prefix string = ",\"state\":"
+ out.RawString(prefix)
+ out.Int32(int32(x.State))
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *NodeInfo) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *NodeInfo) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "publicKey":
+ {
+ var f []byte
+ f = in.Bytes()
+ x.PublicKey = f
+ }
+ case "addresses":
+ {
+ var f string
+ var list []string
+ in.Delim('[')
+ for !in.IsDelim(']') {
+ f = in.String()
+ list = append(list, f)
+ in.WantComma()
+ }
+ x.Addresses = list
+ in.Delim(']')
+ }
+ case "attributes":
+ {
+ var f *NodeInfo_Attribute
+ var list []*NodeInfo_Attribute
+ in.Delim('[')
+ for !in.IsDelim(']') {
+ f = new(NodeInfo_Attribute)
+ f.UnmarshalEasyJSON(in)
+ list = append(list, f)
+ in.WantComma()
+ }
+ x.Attributes = list
+ in.Delim(']')
+ }
+ case "state":
+ {
+ var f NetmapStatus
+ var parsedValue NetmapStatus
+ switch v := in.Interface().(type) {
+ case string:
+ if vv, ok := NetmapStatus_value[v]; ok {
+ parsedValue = NetmapStatus(vv)
+ break
+ }
+ vv, err := strconv.ParseInt(v, 10, 32)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ parsedValue = NetmapStatus(vv)
+ case float64:
+ parsedValue = NetmapStatus(v)
+ }
+ f = parsedValue
+ x.State = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type Netmap struct {
+ Epoch uint64 `json:"epoch"`
+ Nodes []*NodeInfo `json:"nodes"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*Netmap)(nil)
+ _ encoding.ProtoUnmarshaler = (*Netmap)(nil)
+ _ json.Marshaler = (*Netmap)(nil)
+ _ json.Unmarshaler = (*Netmap)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -127,29 +800,180 @@ func (x *Netmap) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *Netmap) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.UInt64Marshal(1, buf[offset:], x.Epoch)
- for i := range x.Nodes {
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Nodes[i])
- }
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *Netmap) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *Netmap) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Epoch != 0 {
+ mm.AppendUint64(1, x.Epoch)
+ }
+ for i := range x.Nodes {
+ if x.Nodes[i] != nil && x.Nodes[i].StableSize() != 0 {
+ x.Nodes[i].EmitProtobuf(mm.AppendMessage(2))
+ }
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *Netmap) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "Netmap")
+ }
+ switch fc.FieldNum {
+ case 1: // Epoch
+ data, ok := fc.Uint64()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Epoch")
+ }
+ x.Epoch = data
+ case 2: // Nodes
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Nodes")
+ }
+ x.Nodes = append(x.Nodes, new(NodeInfo))
+ ff := x.Nodes[len(x.Nodes)-1]
+ if err := ff.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *Netmap) GetEpoch() uint64 {
+ if x != nil {
+ return x.Epoch
+ }
+ return 0
+}
+func (x *Netmap) SetEpoch(v uint64) {
+ x.Epoch = v
+}
+func (x *Netmap) GetNodes() []*NodeInfo {
+ if x != nil {
+ return x.Nodes
+ }
+ return nil
+}
+func (x *Netmap) SetNodes(v []*NodeInfo) {
+ x.Nodes = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *Netmap) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *Netmap) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"epoch\":"
+ out.RawString(prefix[1:])
+ out.Uint64(x.Epoch)
+ }
+ {
+ const prefix string = ",\"nodes\":"
+ out.RawString(prefix)
+ out.RawByte('[')
+ for i := range x.Nodes {
+ if i != 0 {
+ out.RawByte(',')
+ }
+ x.Nodes[i].MarshalEasyJSON(out)
+ }
+ out.RawByte(']')
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *Netmap) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *Netmap) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "epoch":
+ {
+ var f uint64
+ f = in.Uint64()
+ x.Epoch = f
+ }
+ case "nodes":
+ {
+ var f *NodeInfo
+ var list []*NodeInfo
+ in.Delim('[')
+ for !in.IsDelim(']') {
+ f = new(NodeInfo)
+ f.UnmarshalEasyJSON(in)
+ list = append(list, f)
+ in.WantComma()
+ }
+ x.Nodes = list
+ in.Delim(']')
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type ShardInfo struct {
+ Shard_ID []byte `json:"shardID"`
+ MetabasePath string `json:"metabasePath"`
+ Blobstor []*BlobstorInfo `json:"blobstor"`
+ WritecachePath string `json:"writecachePath"`
+ Mode ShardMode `json:"mode"`
+ ErrorCount uint32 `json:"errorCount"`
+ PiloramaPath string `json:"piloramaPath"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*ShardInfo)(nil)
+ _ encoding.ProtoUnmarshaler = (*ShardInfo)(nil)
+ _ json.Marshaler = (*ShardInfo)(nil)
+ _ json.Unmarshaler = (*ShardInfo)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -169,34 +993,336 @@ func (x *ShardInfo) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *ShardInfo) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.BytesMarshal(1, buf[offset:], x.Shard_ID)
- offset += proto.StringMarshal(2, buf[offset:], x.MetabasePath)
- for i := range x.Blobstor {
- offset += proto.NestedStructureMarshal(3, buf[offset:], x.Blobstor[i])
- }
- offset += proto.StringMarshal(4, buf[offset:], x.WritecachePath)
- offset += proto.EnumMarshal(5, buf[offset:], int32(x.Mode))
- offset += proto.UInt32Marshal(6, buf[offset:], x.ErrorCount)
- offset += proto.StringMarshal(7, buf[offset:], x.PiloramaPath)
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *ShardInfo) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *ShardInfo) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if len(x.Shard_ID) != 0 {
+ mm.AppendBytes(1, x.Shard_ID)
+ }
+ if len(x.MetabasePath) != 0 {
+ mm.AppendString(2, x.MetabasePath)
+ }
+ for i := range x.Blobstor {
+ if x.Blobstor[i] != nil && x.Blobstor[i].StableSize() != 0 {
+ x.Blobstor[i].EmitProtobuf(mm.AppendMessage(3))
+ }
+ }
+ if len(x.WritecachePath) != 0 {
+ mm.AppendString(4, x.WritecachePath)
+ }
+ if int32(x.Mode) != 0 {
+ mm.AppendInt32(5, int32(x.Mode))
+ }
+ if x.ErrorCount != 0 {
+ mm.AppendUint32(6, x.ErrorCount)
+ }
+ if len(x.PiloramaPath) != 0 {
+ mm.AppendString(7, x.PiloramaPath)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *ShardInfo) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "ShardInfo")
+ }
+ switch fc.FieldNum {
+ case 1: // Shard_ID
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Shard_ID")
+ }
+ x.Shard_ID = data
+ case 2: // MetabasePath
+ data, ok := fc.String()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "MetabasePath")
+ }
+ x.MetabasePath = data
+ case 3: // Blobstor
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Blobstor")
+ }
+ x.Blobstor = append(x.Blobstor, new(BlobstorInfo))
+ ff := x.Blobstor[len(x.Blobstor)-1]
+ if err := ff.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 4: // WritecachePath
+ data, ok := fc.String()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "WritecachePath")
+ }
+ x.WritecachePath = data
+ case 5: // Mode
+ data, ok := fc.Int32()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Mode")
+ }
+ x.Mode = ShardMode(data)
+ case 6: // ErrorCount
+ data, ok := fc.Uint32()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "ErrorCount")
+ }
+ x.ErrorCount = data
+ case 7: // PiloramaPath
+ data, ok := fc.String()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "PiloramaPath")
+ }
+ x.PiloramaPath = data
+ }
+ }
+ return nil
+}
+func (x *ShardInfo) GetShard_ID() []byte {
+ if x != nil {
+ return x.Shard_ID
+ }
+ return nil
+}
+func (x *ShardInfo) SetShard_ID(v []byte) {
+ x.Shard_ID = v
+}
+func (x *ShardInfo) GetMetabasePath() string {
+ if x != nil {
+ return x.MetabasePath
+ }
+ return ""
+}
+func (x *ShardInfo) SetMetabasePath(v string) {
+ x.MetabasePath = v
+}
+func (x *ShardInfo) GetBlobstor() []*BlobstorInfo {
+ if x != nil {
+ return x.Blobstor
+ }
+ return nil
+}
+func (x *ShardInfo) SetBlobstor(v []*BlobstorInfo) {
+ x.Blobstor = v
+}
+func (x *ShardInfo) GetWritecachePath() string {
+ if x != nil {
+ return x.WritecachePath
+ }
+ return ""
+}
+func (x *ShardInfo) SetWritecachePath(v string) {
+ x.WritecachePath = v
+}
+func (x *ShardInfo) GetMode() ShardMode {
+ if x != nil {
+ return x.Mode
+ }
+ return 0
+}
+func (x *ShardInfo) SetMode(v ShardMode) {
+ x.Mode = v
+}
+func (x *ShardInfo) GetErrorCount() uint32 {
+ if x != nil {
+ return x.ErrorCount
+ }
+ return 0
+}
+func (x *ShardInfo) SetErrorCount(v uint32) {
+ x.ErrorCount = v
+}
+func (x *ShardInfo) GetPiloramaPath() string {
+ if x != nil {
+ return x.PiloramaPath
+ }
+ return ""
+}
+func (x *ShardInfo) SetPiloramaPath(v string) {
+ x.PiloramaPath = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *ShardInfo) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *ShardInfo) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"shardID\":"
+ out.RawString(prefix[1:])
+ out.Base64Bytes(x.Shard_ID)
+ }
+ {
+ const prefix string = ",\"metabasePath\":"
+ out.RawString(prefix)
+ out.String(x.MetabasePath)
+ }
+ {
+ const prefix string = ",\"blobstor\":"
+ out.RawString(prefix)
+ out.RawByte('[')
+ for i := range x.Blobstor {
+ if i != 0 {
+ out.RawByte(',')
+ }
+ x.Blobstor[i].MarshalEasyJSON(out)
+ }
+ out.RawByte(']')
+ }
+ {
+ const prefix string = ",\"writecachePath\":"
+ out.RawString(prefix)
+ out.String(x.WritecachePath)
+ }
+ {
+ const prefix string = ",\"mode\":"
+ out.RawString(prefix)
+ out.Int32(int32(x.Mode))
+ }
+ {
+ const prefix string = ",\"errorCount\":"
+ out.RawString(prefix)
+ out.Uint32(x.ErrorCount)
+ }
+ {
+ const prefix string = ",\"piloramaPath\":"
+ out.RawString(prefix)
+ out.String(x.PiloramaPath)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *ShardInfo) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *ShardInfo) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "shardID":
+ {
+ var f []byte
+ f = in.Bytes()
+ x.Shard_ID = f
+ }
+ case "metabasePath":
+ {
+ var f string
+ f = in.String()
+ x.MetabasePath = f
+ }
+ case "blobstor":
+ {
+ var f *BlobstorInfo
+ var list []*BlobstorInfo
+ in.Delim('[')
+ for !in.IsDelim(']') {
+ f = new(BlobstorInfo)
+ f.UnmarshalEasyJSON(in)
+ list = append(list, f)
+ in.WantComma()
+ }
+ x.Blobstor = list
+ in.Delim(']')
+ }
+ case "writecachePath":
+ {
+ var f string
+ f = in.String()
+ x.WritecachePath = f
+ }
+ case "mode":
+ {
+ var f ShardMode
+ var parsedValue ShardMode
+ switch v := in.Interface().(type) {
+ case string:
+ if vv, ok := ShardMode_value[v]; ok {
+ parsedValue = ShardMode(vv)
+ break
+ }
+ vv, err := strconv.ParseInt(v, 10, 32)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ parsedValue = ShardMode(vv)
+ case float64:
+ parsedValue = ShardMode(v)
+ }
+ f = parsedValue
+ x.Mode = f
+ }
+ case "errorCount":
+ {
+ var f uint32
+ f = in.Uint32()
+ x.ErrorCount = f
+ }
+ case "piloramaPath":
+ {
+ var f string
+ f = in.String()
+ x.PiloramaPath = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type BlobstorInfo struct {
+ Path string `json:"path"`
+ Type string `json:"type"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*BlobstorInfo)(nil)
+ _ encoding.ProtoUnmarshaler = (*BlobstorInfo)(nil)
+ _ json.Marshaler = (*BlobstorInfo)(nil)
+ _ json.Unmarshaler = (*BlobstorInfo)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -209,27 +1335,195 @@ func (x *BlobstorInfo) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *BlobstorInfo) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.StringMarshal(1, buf[offset:], x.Path)
- offset += proto.StringMarshal(2, buf[offset:], x.Type)
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *BlobstorInfo) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *BlobstorInfo) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if len(x.Path) != 0 {
+ mm.AppendString(1, x.Path)
+ }
+ if len(x.Type) != 0 {
+ mm.AppendString(2, x.Type)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *BlobstorInfo) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "BlobstorInfo")
+ }
+ switch fc.FieldNum {
+ case 1: // Path
+ data, ok := fc.String()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Path")
+ }
+ x.Path = data
+ case 2: // Type
+ data, ok := fc.String()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Type")
+ }
+ x.Type = data
+ }
+ }
+ return nil
+}
+func (x *BlobstorInfo) GetPath() string {
+ if x != nil {
+ return x.Path
+ }
+ return ""
+}
+func (x *BlobstorInfo) SetPath(v string) {
+ x.Path = v
+}
+func (x *BlobstorInfo) GetType() string {
+ if x != nil {
+ return x.Type
+ }
+ return ""
+}
+func (x *BlobstorInfo) SetType(v string) {
+ x.Type = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *BlobstorInfo) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *BlobstorInfo) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"path\":"
+ out.RawString(prefix[1:])
+ out.String(x.Path)
+ }
+ {
+ const prefix string = ",\"type\":"
+ out.RawString(prefix)
+ out.String(x.Type)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *BlobstorInfo) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *BlobstorInfo) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "path":
+ {
+ var f string
+ f = in.String()
+ x.Path = f
+ }
+ case "type":
+ {
+ var f string
+ f = in.String()
+ x.Type = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type ChainTarget_TargetType int32
+
+const (
+ ChainTarget_UNDEFINED ChainTarget_TargetType = 0
+ ChainTarget_NAMESPACE ChainTarget_TargetType = 1
+ ChainTarget_CONTAINER ChainTarget_TargetType = 2
+ ChainTarget_USER ChainTarget_TargetType = 3
+ ChainTarget_GROUP ChainTarget_TargetType = 4
+)
+
+var (
+ ChainTarget_TargetType_name = map[int32]string{
+ 0: "UNDEFINED",
+ 1: "NAMESPACE",
+ 2: "CONTAINER",
+ 3: "USER",
+ 4: "GROUP",
+ }
+ ChainTarget_TargetType_value = map[string]int32{
+ "UNDEFINED": 0,
+ "NAMESPACE": 1,
+ "CONTAINER": 2,
+ "USER": 3,
+ "GROUP": 4,
+ }
+)
+
+func (x ChainTarget_TargetType) String() string {
+ if v, ok := ChainTarget_TargetType_name[int32(x)]; ok {
+ return v
+ }
+ return strconv.FormatInt(int64(x), 10)
+}
+func (x *ChainTarget_TargetType) FromString(s string) bool {
+ if v, ok := ChainTarget_TargetType_value[s]; ok {
+ *x = ChainTarget_TargetType(v)
+ return true
+ }
+ return false
+}
+
+type ChainTarget struct {
+ Type ChainTarget_TargetType `json:"type"`
+ Name string `json:"Name"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*ChainTarget)(nil)
+ _ encoding.ProtoUnmarshaler = (*ChainTarget)(nil)
+ _ json.Marshaler = (*ChainTarget)(nil)
+ _ json.Unmarshaler = (*ChainTarget)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -242,23 +1536,154 @@ func (x *ChainTarget) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *ChainTarget) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.EnumMarshal(1, buf[offset:], int32(x.Type))
- offset += proto.StringMarshal(2, buf[offset:], x.Name)
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *ChainTarget) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *ChainTarget) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if int32(x.Type) != 0 {
+ mm.AppendInt32(1, int32(x.Type))
+ }
+ if len(x.Name) != 0 {
+ mm.AppendString(2, x.Name)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *ChainTarget) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "ChainTarget")
+ }
+ switch fc.FieldNum {
+ case 1: // Type
+ data, ok := fc.Int32()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Type")
+ }
+ x.Type = ChainTarget_TargetType(data)
+ case 2: // Name
+ data, ok := fc.String()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Name")
+ }
+ x.Name = data
+ }
+ }
+ return nil
+}
+func (x *ChainTarget) GetType() ChainTarget_TargetType {
+ if x != nil {
+ return x.Type
+ }
+ return 0
+}
+func (x *ChainTarget) SetType(v ChainTarget_TargetType) {
+ x.Type = v
+}
+func (x *ChainTarget) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+func (x *ChainTarget) SetName(v string) {
+ x.Name = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *ChainTarget) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *ChainTarget) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"type\":"
+ out.RawString(prefix[1:])
+ out.Int32(int32(x.Type))
+ }
+ {
+ const prefix string = ",\"Name\":"
+ out.RawString(prefix)
+ out.String(x.Name)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *ChainTarget) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *ChainTarget) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "type":
+ {
+ var f ChainTarget_TargetType
+ var parsedValue ChainTarget_TargetType
+ switch v := in.Interface().(type) {
+ case string:
+ if vv, ok := ChainTarget_TargetType_value[v]; ok {
+ parsedValue = ChainTarget_TargetType(vv)
+ break
+ }
+ vv, err := strconv.ParseInt(v, 10, 32)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ parsedValue = ChainTarget_TargetType(vv)
+ case float64:
+ parsedValue = ChainTarget_TargetType(v)
+ }
+ f = parsedValue
+ x.Type = f
+ }
+ case "Name":
+ {
+ var f string
+ f = in.String()
+ x.Name = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
}
diff --git a/pkg/services/control/types_test.go b/pkg/services/control/types_test.go
deleted file mode 100644
index df0cdf141..000000000
--- a/pkg/services/control/types_test.go
+++ /dev/null
@@ -1,151 +0,0 @@
-package control_test
-
-import (
- "bytes"
- "path/filepath"
- "strconv"
- "testing"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
- "github.com/google/uuid"
-)
-
-func TestNetmap_StableMarshal(t *testing.T) {
- testStableMarshal(t, generateNetmap(), new(control.Netmap), func(m1, m2 protoMessage) bool {
- return equalNetmaps(m1.(*control.Netmap), m2.(*control.Netmap))
- })
-}
-
-func generateNetmap() *control.Netmap {
- nm := new(control.Netmap)
- nm.SetEpoch(13)
-
- const nodeCount = 2
-
- nodes := make([]*control.NodeInfo, 0, nodeCount)
-
- for i := 0; i < nodeCount; i++ {
- n := new(control.NodeInfo)
- n.SetPublicKey(testData(33))
- n.SetAddresses([]string{testString(), testString()})
- n.SetState(control.NetmapStatus_ONLINE)
-
- const attrCount = 2
-
- attrs := make([]*control.NodeInfo_Attribute, 0, attrCount)
-
- for j := 0; j < attrCount; j++ {
- a := new(control.NodeInfo_Attribute)
- a.SetKey(testString())
- a.SetValue(testString())
-
- const parentsCount = 2
-
- parents := make([]string, 0, parentsCount)
-
- for k := 0; k < parentsCount; k++ {
- parents = append(parents, testString())
- }
-
- a.SetParents(parents)
-
- attrs = append(attrs, a)
- }
-
- n.SetAttributes(attrs)
-
- nodes = append(nodes, n)
- }
-
- nm.SetNodes(nodes)
-
- return nm
-}
-
-func equalNetmaps(nm1, nm2 *control.Netmap) bool {
- if nm1.GetEpoch() != nm2.GetEpoch() {
- return false
- }
-
- n1, n2 := nm1.GetNodes(), nm2.GetNodes()
-
- if len(n1) != len(n2) {
- return false
- }
-
- for i := range n1 {
- if !equalNodeInfos(n1[i], n2[i]) {
- return false
- }
- }
-
- return true
-}
-
-func equalNodeInfos(n1, n2 *control.NodeInfo) bool {
- if !bytes.Equal(n1.GetPublicKey(), n2.GetPublicKey()) ||
- n1.GetState() != n2.GetState() {
- return false
- }
-
- na1, na2 := n1.GetAddresses(), n2.GetAddresses()
-
- if len(na1) != len(na2) {
- return false
- }
-
- for i := range na1 {
- if na1[i] != na2[i] {
- return false
- }
- }
-
- a1, a2 := n1.GetAttributes(), n2.GetAttributes()
-
- if len(a1) != len(a2) {
- return false
- }
-
- for i := range a1 {
- if a1[i].GetKey() != a2[i].GetKey() || a1[i].GetValue() != a2[i].GetValue() {
- return false
- }
-
- p1, p2 := a1[i].GetParents(), a2[i].GetParents()
-
- if len(p1) != len(p2) {
- return false
- }
-
- for j := range p1 {
- if p1[j] != p2[j] {
- return false
- }
- }
- }
-
- return true
-}
-
-func generateShardInfo(id int) *control.ShardInfo {
- si := new(control.ShardInfo)
-
- path := "/nice/dir/awesome/files/" + strconv.Itoa(id)
-
- uid, _ := uuid.NewRandom()
- bin, _ := uid.MarshalBinary()
-
- si.SetID(bin)
- si.SetMode(control.ShardMode_READ_WRITE)
- si.SetMetabasePath(filepath.Join(path, "meta"))
- si.Blobstor = []*control.BlobstorInfo{
- {Type: fstree.Type, Path: filepath.Join(path, "fstree")},
- {Type: blobovniczatree.Type, Path: filepath.Join(path, "blobtree")},
- }
- si.SetWriteCachePath(filepath.Join(path, "writecache"))
- si.SetPiloramaPath(filepath.Join(path, "pilorama"))
-
- return si
-}
diff --git a/pkg/services/tree/service.pb.go b/pkg/services/tree/service.pb.go
deleted file mode 100644
index f439e3f28..000000000
--- a/pkg/services/tree/service.pb.go
+++ /dev/null
@@ -1,3587 +0,0 @@
-//*
-// Service for working with CRDT tree.
-
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// versions:
-// protoc-gen-go v1.33.0
-// protoc v4.25.0
-// source: pkg/services/tree/service.proto
-
-package tree
-
-import (
- protoreflect "google.golang.org/protobuf/reflect/protoreflect"
- protoimpl "google.golang.org/protobuf/runtime/protoimpl"
- reflect "reflect"
- sync "sync"
-)
-
-const (
- // Verify that this generated code is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
- // Verify that runtime/protoimpl is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
-)
-
-type GetSubTreeRequest_Body_Order_Direction int32
-
-const (
- GetSubTreeRequest_Body_Order_None GetSubTreeRequest_Body_Order_Direction = 0
- GetSubTreeRequest_Body_Order_Asc GetSubTreeRequest_Body_Order_Direction = 1
-)
-
-// Enum value maps for GetSubTreeRequest_Body_Order_Direction.
-var (
- GetSubTreeRequest_Body_Order_Direction_name = map[int32]string{
- 0: "None",
- 1: "Asc",
- }
- GetSubTreeRequest_Body_Order_Direction_value = map[string]int32{
- "None": 0,
- "Asc": 1,
- }
-)
-
-func (x GetSubTreeRequest_Body_Order_Direction) Enum() *GetSubTreeRequest_Body_Order_Direction {
- p := new(GetSubTreeRequest_Body_Order_Direction)
- *p = x
- return p
-}
-
-func (x GetSubTreeRequest_Body_Order_Direction) String() string {
- return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
-}
-
-func (GetSubTreeRequest_Body_Order_Direction) Descriptor() protoreflect.EnumDescriptor {
- return file_pkg_services_tree_service_proto_enumTypes[0].Descriptor()
-}
-
-func (GetSubTreeRequest_Body_Order_Direction) Type() protoreflect.EnumType {
- return &file_pkg_services_tree_service_proto_enumTypes[0]
-}
-
-func (x GetSubTreeRequest_Body_Order_Direction) Number() protoreflect.EnumNumber {
- return protoreflect.EnumNumber(x)
-}
-
-// Deprecated: Use GetSubTreeRequest_Body_Order_Direction.Descriptor instead.
-func (GetSubTreeRequest_Body_Order_Direction) EnumDescriptor() ([]byte, []int) {
- return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{10, 0, 0, 0}
-}
-
-type AddRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Request body.
- Body *AddRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- // Request signature.
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *AddRequest) Reset() {
- *x = AddRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_service_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *AddRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*AddRequest) ProtoMessage() {}
-
-func (x *AddRequest) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_service_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use AddRequest.ProtoReflect.Descriptor instead.
-func (*AddRequest) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{0}
-}
-
-func (x *AddRequest) GetBody() *AddRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *AddRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-type AddResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Response body.
- Body *AddResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- // Response signature.
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *AddResponse) Reset() {
- *x = AddResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_service_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *AddResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*AddResponse) ProtoMessage() {}
-
-func (x *AddResponse) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_service_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use AddResponse.ProtoReflect.Descriptor instead.
-func (*AddResponse) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{1}
-}
-
-func (x *AddResponse) GetBody() *AddResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *AddResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-type AddByPathRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Request body.
- Body *AddByPathRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- // Request signature.
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *AddByPathRequest) Reset() {
- *x = AddByPathRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_service_proto_msgTypes[2]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *AddByPathRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*AddByPathRequest) ProtoMessage() {}
-
-func (x *AddByPathRequest) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_service_proto_msgTypes[2]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use AddByPathRequest.ProtoReflect.Descriptor instead.
-func (*AddByPathRequest) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{2}
-}
-
-func (x *AddByPathRequest) GetBody() *AddByPathRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *AddByPathRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-type AddByPathResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Response body.
- Body *AddByPathResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- // Response signature.
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *AddByPathResponse) Reset() {
- *x = AddByPathResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_service_proto_msgTypes[3]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *AddByPathResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*AddByPathResponse) ProtoMessage() {}
-
-func (x *AddByPathResponse) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_service_proto_msgTypes[3]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use AddByPathResponse.ProtoReflect.Descriptor instead.
-func (*AddByPathResponse) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{3}
-}
-
-func (x *AddByPathResponse) GetBody() *AddByPathResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *AddByPathResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-type RemoveRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Request body.
- Body *RemoveRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- // Request signature.
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *RemoveRequest) Reset() {
- *x = RemoveRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_service_proto_msgTypes[4]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *RemoveRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*RemoveRequest) ProtoMessage() {}
-
-func (x *RemoveRequest) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_service_proto_msgTypes[4]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use RemoveRequest.ProtoReflect.Descriptor instead.
-func (*RemoveRequest) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{4}
-}
-
-func (x *RemoveRequest) GetBody() *RemoveRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *RemoveRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-type RemoveResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Response body.
- Body *RemoveResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- // Response signature.
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *RemoveResponse) Reset() {
- *x = RemoveResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_service_proto_msgTypes[5]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *RemoveResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*RemoveResponse) ProtoMessage() {}
-
-func (x *RemoveResponse) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_service_proto_msgTypes[5]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use RemoveResponse.ProtoReflect.Descriptor instead.
-func (*RemoveResponse) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{5}
-}
-
-func (x *RemoveResponse) GetBody() *RemoveResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *RemoveResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-type MoveRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Request body.
- Body *MoveRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- // Request signature.
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *MoveRequest) Reset() {
- *x = MoveRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_service_proto_msgTypes[6]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *MoveRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*MoveRequest) ProtoMessage() {}
-
-func (x *MoveRequest) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_service_proto_msgTypes[6]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use MoveRequest.ProtoReflect.Descriptor instead.
-func (*MoveRequest) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{6}
-}
-
-func (x *MoveRequest) GetBody() *MoveRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *MoveRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-type MoveResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Response body.
- Body *MoveResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- // Response signature.
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *MoveResponse) Reset() {
- *x = MoveResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_service_proto_msgTypes[7]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *MoveResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*MoveResponse) ProtoMessage() {}
-
-func (x *MoveResponse) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_service_proto_msgTypes[7]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use MoveResponse.ProtoReflect.Descriptor instead.
-func (*MoveResponse) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{7}
-}
-
-func (x *MoveResponse) GetBody() *MoveResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *MoveResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-type GetNodeByPathRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Request body.
- Body *GetNodeByPathRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- // Request signature.
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *GetNodeByPathRequest) Reset() {
- *x = GetNodeByPathRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_service_proto_msgTypes[8]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetNodeByPathRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetNodeByPathRequest) ProtoMessage() {}
-
-func (x *GetNodeByPathRequest) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_service_proto_msgTypes[8]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetNodeByPathRequest.ProtoReflect.Descriptor instead.
-func (*GetNodeByPathRequest) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{8}
-}
-
-func (x *GetNodeByPathRequest) GetBody() *GetNodeByPathRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *GetNodeByPathRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-type GetNodeByPathResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Response body.
- Body *GetNodeByPathResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- // Response signature.
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *GetNodeByPathResponse) Reset() {
- *x = GetNodeByPathResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_service_proto_msgTypes[9]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetNodeByPathResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetNodeByPathResponse) ProtoMessage() {}
-
-func (x *GetNodeByPathResponse) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_service_proto_msgTypes[9]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetNodeByPathResponse.ProtoReflect.Descriptor instead.
-func (*GetNodeByPathResponse) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{9}
-}
-
-func (x *GetNodeByPathResponse) GetBody() *GetNodeByPathResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *GetNodeByPathResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-type GetSubTreeRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Request body.
- Body *GetSubTreeRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- // Request signature.
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *GetSubTreeRequest) Reset() {
- *x = GetSubTreeRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_service_proto_msgTypes[10]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetSubTreeRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetSubTreeRequest) ProtoMessage() {}
-
-func (x *GetSubTreeRequest) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_service_proto_msgTypes[10]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetSubTreeRequest.ProtoReflect.Descriptor instead.
-func (*GetSubTreeRequest) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{10}
-}
-
-func (x *GetSubTreeRequest) GetBody() *GetSubTreeRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *GetSubTreeRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-type GetSubTreeResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Response body.
- Body *GetSubTreeResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- // Response signature.
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *GetSubTreeResponse) Reset() {
- *x = GetSubTreeResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_service_proto_msgTypes[11]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetSubTreeResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetSubTreeResponse) ProtoMessage() {}
-
-func (x *GetSubTreeResponse) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_service_proto_msgTypes[11]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetSubTreeResponse.ProtoReflect.Descriptor instead.
-func (*GetSubTreeResponse) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{11}
-}
-
-func (x *GetSubTreeResponse) GetBody() *GetSubTreeResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *GetSubTreeResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-type TreeListRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Request body.
- Body *TreeListRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- // Request signature.
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *TreeListRequest) Reset() {
- *x = TreeListRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_service_proto_msgTypes[12]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *TreeListRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*TreeListRequest) ProtoMessage() {}
-
-func (x *TreeListRequest) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_service_proto_msgTypes[12]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use TreeListRequest.ProtoReflect.Descriptor instead.
-func (*TreeListRequest) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{12}
-}
-
-func (x *TreeListRequest) GetBody() *TreeListRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *TreeListRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-type TreeListResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Response body.
- Body *TreeListResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *TreeListResponse) Reset() {
- *x = TreeListResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_service_proto_msgTypes[13]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *TreeListResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*TreeListResponse) ProtoMessage() {}
-
-func (x *TreeListResponse) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_service_proto_msgTypes[13]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use TreeListResponse.ProtoReflect.Descriptor instead.
-func (*TreeListResponse) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{13}
-}
-
-func (x *TreeListResponse) GetBody() *TreeListResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *TreeListResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-type ApplyRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Request body.
- Body *ApplyRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- // Request signature.
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *ApplyRequest) Reset() {
- *x = ApplyRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_service_proto_msgTypes[14]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ApplyRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ApplyRequest) ProtoMessage() {}
-
-func (x *ApplyRequest) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_service_proto_msgTypes[14]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ApplyRequest.ProtoReflect.Descriptor instead.
-func (*ApplyRequest) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{14}
-}
-
-func (x *ApplyRequest) GetBody() *ApplyRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *ApplyRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-type ApplyResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Response body.
- Body *ApplyResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- // Response signature.
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *ApplyResponse) Reset() {
- *x = ApplyResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_service_proto_msgTypes[15]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ApplyResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ApplyResponse) ProtoMessage() {}
-
-func (x *ApplyResponse) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_service_proto_msgTypes[15]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ApplyResponse.ProtoReflect.Descriptor instead.
-func (*ApplyResponse) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{15}
-}
-
-func (x *ApplyResponse) GetBody() *ApplyResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *ApplyResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-type GetOpLogRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Request body.
- Body *GetOpLogRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- // Request signature.
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *GetOpLogRequest) Reset() {
- *x = GetOpLogRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_service_proto_msgTypes[16]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetOpLogRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetOpLogRequest) ProtoMessage() {}
-
-func (x *GetOpLogRequest) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_service_proto_msgTypes[16]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetOpLogRequest.ProtoReflect.Descriptor instead.
-func (*GetOpLogRequest) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{16}
-}
-
-func (x *GetOpLogRequest) GetBody() *GetOpLogRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *GetOpLogRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-type GetOpLogResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Response body.
- Body *GetOpLogResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- // Response signature.
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *GetOpLogResponse) Reset() {
- *x = GetOpLogResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_service_proto_msgTypes[17]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetOpLogResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetOpLogResponse) ProtoMessage() {}
-
-func (x *GetOpLogResponse) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_service_proto_msgTypes[17]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetOpLogResponse.ProtoReflect.Descriptor instead.
-func (*GetOpLogResponse) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{17}
-}
-
-func (x *GetOpLogResponse) GetBody() *GetOpLogResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *GetOpLogResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-type HealthcheckResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Response body.
- Body *HealthcheckResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- // Response signature.
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *HealthcheckResponse) Reset() {
- *x = HealthcheckResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_service_proto_msgTypes[18]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *HealthcheckResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*HealthcheckResponse) ProtoMessage() {}
-
-func (x *HealthcheckResponse) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_service_proto_msgTypes[18]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use HealthcheckResponse.ProtoReflect.Descriptor instead.
-func (*HealthcheckResponse) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{18}
-}
-
-func (x *HealthcheckResponse) GetBody() *HealthcheckResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *HealthcheckResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-type HealthcheckRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Request body.
- Body *HealthcheckRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- // Request signature.
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *HealthcheckRequest) Reset() {
- *x = HealthcheckRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_service_proto_msgTypes[19]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *HealthcheckRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*HealthcheckRequest) ProtoMessage() {}
-
-func (x *HealthcheckRequest) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_service_proto_msgTypes[19]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use HealthcheckRequest.ProtoReflect.Descriptor instead.
-func (*HealthcheckRequest) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{19}
-}
-
-func (x *HealthcheckRequest) GetBody() *HealthcheckRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *HealthcheckRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-type AddRequest_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Container ID in V2 format.
- ContainerId []byte `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
- // The name of the tree.
- TreeId string `protobuf:"bytes,2,opt,name=tree_id,json=treeId,proto3" json:"tree_id,omitempty"`
- // ID of the parent to attach node to.
- ParentId uint64 `protobuf:"varint,3,opt,name=parent_id,json=parentId,proto3" json:"parent_id,omitempty"`
- // Key-Value pairs with meta information.
- Meta []*KeyValue `protobuf:"bytes,4,rep,name=meta,proto3" json:"meta,omitempty"`
- // Bearer token in V2 format.
- BearerToken []byte `protobuf:"bytes,5,opt,name=bearer_token,json=bearerToken,proto3" json:"bearer_token,omitempty"`
-}
-
-func (x *AddRequest_Body) Reset() {
- *x = AddRequest_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_service_proto_msgTypes[20]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *AddRequest_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*AddRequest_Body) ProtoMessage() {}
-
-func (x *AddRequest_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_service_proto_msgTypes[20]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use AddRequest_Body.ProtoReflect.Descriptor instead.
-func (*AddRequest_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{0, 0}
-}
-
-func (x *AddRequest_Body) GetContainerId() []byte {
- if x != nil {
- return x.ContainerId
- }
- return nil
-}
-
-func (x *AddRequest_Body) GetTreeId() string {
- if x != nil {
- return x.TreeId
- }
- return ""
-}
-
-func (x *AddRequest_Body) GetParentId() uint64 {
- if x != nil {
- return x.ParentId
- }
- return 0
-}
-
-func (x *AddRequest_Body) GetMeta() []*KeyValue {
- if x != nil {
- return x.Meta
- }
- return nil
-}
-
-func (x *AddRequest_Body) GetBearerToken() []byte {
- if x != nil {
- return x.BearerToken
- }
- return nil
-}
-
-type AddResponse_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // ID of the created node.
- NodeId uint64 `protobuf:"varint,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
-}
-
-func (x *AddResponse_Body) Reset() {
- *x = AddResponse_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_service_proto_msgTypes[21]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *AddResponse_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*AddResponse_Body) ProtoMessage() {}
-
-func (x *AddResponse_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_service_proto_msgTypes[21]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use AddResponse_Body.ProtoReflect.Descriptor instead.
-func (*AddResponse_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{1, 0}
-}
-
-func (x *AddResponse_Body) GetNodeId() uint64 {
- if x != nil {
- return x.NodeId
- }
- return 0
-}
-
-type AddByPathRequest_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Container ID in V2 format.
- ContainerId []byte `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
- // The name of the tree.
- TreeId string `protobuf:"bytes,2,opt,name=tree_id,json=treeId,proto3" json:"tree_id,omitempty"`
- // Attribute to build path with. Default: "FileName".
- PathAttribute string `protobuf:"bytes,3,opt,name=path_attribute,json=pathAttribute,proto3" json:"path_attribute,omitempty"`
- // List of path components.
- Path []string `protobuf:"bytes,4,rep,name=path,proto3" json:"path,omitempty"`
- // Node meta-information.
- Meta []*KeyValue `protobuf:"bytes,5,rep,name=meta,proto3" json:"meta,omitempty"`
- // Bearer token in V2 format.
- BearerToken []byte `protobuf:"bytes,6,opt,name=bearer_token,json=bearerToken,proto3" json:"bearer_token,omitempty"`
-}
-
-func (x *AddByPathRequest_Body) Reset() {
- *x = AddByPathRequest_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_service_proto_msgTypes[22]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *AddByPathRequest_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*AddByPathRequest_Body) ProtoMessage() {}
-
-func (x *AddByPathRequest_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_service_proto_msgTypes[22]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use AddByPathRequest_Body.ProtoReflect.Descriptor instead.
-func (*AddByPathRequest_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{2, 0}
-}
-
-func (x *AddByPathRequest_Body) GetContainerId() []byte {
- if x != nil {
- return x.ContainerId
- }
- return nil
-}
-
-func (x *AddByPathRequest_Body) GetTreeId() string {
- if x != nil {
- return x.TreeId
- }
- return ""
-}
-
-func (x *AddByPathRequest_Body) GetPathAttribute() string {
- if x != nil {
- return x.PathAttribute
- }
- return ""
-}
-
-func (x *AddByPathRequest_Body) GetPath() []string {
- if x != nil {
- return x.Path
- }
- return nil
-}
-
-func (x *AddByPathRequest_Body) GetMeta() []*KeyValue {
- if x != nil {
- return x.Meta
- }
- return nil
-}
-
-func (x *AddByPathRequest_Body) GetBearerToken() []byte {
- if x != nil {
- return x.BearerToken
- }
- return nil
-}
-
-type AddByPathResponse_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // List of all created nodes. The first one is the leaf.
- Nodes []uint64 `protobuf:"varint,1,rep,packed,name=nodes,proto3" json:"nodes,omitempty"`
- // ID of the parent node where new nodes were attached.
- ParentId uint64 `protobuf:"varint,2,opt,name=parent_id,json=parentId,proto3" json:"parent_id,omitempty"`
-}
-
-func (x *AddByPathResponse_Body) Reset() {
- *x = AddByPathResponse_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_service_proto_msgTypes[23]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *AddByPathResponse_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*AddByPathResponse_Body) ProtoMessage() {}
-
-func (x *AddByPathResponse_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_service_proto_msgTypes[23]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use AddByPathResponse_Body.ProtoReflect.Descriptor instead.
-func (*AddByPathResponse_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{3, 0}
-}
-
-func (x *AddByPathResponse_Body) GetNodes() []uint64 {
- if x != nil {
- return x.Nodes
- }
- return nil
-}
-
-func (x *AddByPathResponse_Body) GetParentId() uint64 {
- if x != nil {
- return x.ParentId
- }
- return 0
-}
-
-type RemoveRequest_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Container ID in V2 format.
- ContainerId []byte `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
- // The name of the tree.
- TreeId string `protobuf:"bytes,2,opt,name=tree_id,json=treeId,proto3" json:"tree_id,omitempty"`
- // ID of the node to remove.
- NodeId uint64 `protobuf:"varint,3,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
- // Bearer token in V2 format.
- BearerToken []byte `protobuf:"bytes,4,opt,name=bearer_token,json=bearerToken,proto3" json:"bearer_token,omitempty"`
-}
-
-func (x *RemoveRequest_Body) Reset() {
- *x = RemoveRequest_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_service_proto_msgTypes[24]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *RemoveRequest_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*RemoveRequest_Body) ProtoMessage() {}
-
-func (x *RemoveRequest_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_service_proto_msgTypes[24]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use RemoveRequest_Body.ProtoReflect.Descriptor instead.
-func (*RemoveRequest_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{4, 0}
-}
-
-func (x *RemoveRequest_Body) GetContainerId() []byte {
- if x != nil {
- return x.ContainerId
- }
- return nil
-}
-
-func (x *RemoveRequest_Body) GetTreeId() string {
- if x != nil {
- return x.TreeId
- }
- return ""
-}
-
-func (x *RemoveRequest_Body) GetNodeId() uint64 {
- if x != nil {
- return x.NodeId
- }
- return 0
-}
-
-func (x *RemoveRequest_Body) GetBearerToken() []byte {
- if x != nil {
- return x.BearerToken
- }
- return nil
-}
-
-type RemoveResponse_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-}
-
-func (x *RemoveResponse_Body) Reset() {
- *x = RemoveResponse_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_service_proto_msgTypes[25]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *RemoveResponse_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*RemoveResponse_Body) ProtoMessage() {}
-
-func (x *RemoveResponse_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_service_proto_msgTypes[25]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use RemoveResponse_Body.ProtoReflect.Descriptor instead.
-func (*RemoveResponse_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{5, 0}
-}
-
-type MoveRequest_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // TODO import neo.fs.v2.refs.ContainerID directly.
- // Container ID in V2 format.
- ContainerId []byte `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
- // The name of the tree.
- TreeId string `protobuf:"bytes,2,opt,name=tree_id,json=treeId,proto3" json:"tree_id,omitempty"`
- // ID of the new parent.
- ParentId uint64 `protobuf:"varint,3,opt,name=parent_id,json=parentId,proto3" json:"parent_id,omitempty"`
- // ID of the node to move.
- NodeId uint64 `protobuf:"varint,4,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
- // Node meta-information.
- Meta []*KeyValue `protobuf:"bytes,5,rep,name=meta,proto3" json:"meta,omitempty"`
- // Bearer token in V2 format.
- BearerToken []byte `protobuf:"bytes,6,opt,name=bearer_token,json=bearerToken,proto3" json:"bearer_token,omitempty"`
-}
-
-func (x *MoveRequest_Body) Reset() {
- *x = MoveRequest_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_service_proto_msgTypes[26]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *MoveRequest_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*MoveRequest_Body) ProtoMessage() {}
-
-func (x *MoveRequest_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_service_proto_msgTypes[26]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use MoveRequest_Body.ProtoReflect.Descriptor instead.
-func (*MoveRequest_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{6, 0}
-}
-
-func (x *MoveRequest_Body) GetContainerId() []byte {
- if x != nil {
- return x.ContainerId
- }
- return nil
-}
-
-func (x *MoveRequest_Body) GetTreeId() string {
- if x != nil {
- return x.TreeId
- }
- return ""
-}
-
-func (x *MoveRequest_Body) GetParentId() uint64 {
- if x != nil {
- return x.ParentId
- }
- return 0
-}
-
-func (x *MoveRequest_Body) GetNodeId() uint64 {
- if x != nil {
- return x.NodeId
- }
- return 0
-}
-
-func (x *MoveRequest_Body) GetMeta() []*KeyValue {
- if x != nil {
- return x.Meta
- }
- return nil
-}
-
-func (x *MoveRequest_Body) GetBearerToken() []byte {
- if x != nil {
- return x.BearerToken
- }
- return nil
-}
-
-type MoveResponse_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-}
-
-func (x *MoveResponse_Body) Reset() {
- *x = MoveResponse_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_service_proto_msgTypes[27]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *MoveResponse_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*MoveResponse_Body) ProtoMessage() {}
-
-func (x *MoveResponse_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_service_proto_msgTypes[27]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use MoveResponse_Body.ProtoReflect.Descriptor instead.
-func (*MoveResponse_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{7, 0}
-}
-
-type GetNodeByPathRequest_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Container ID in V2 format.
- ContainerId []byte `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
- // The name of the tree.
- TreeId string `protobuf:"bytes,2,opt,name=tree_id,json=treeId,proto3" json:"tree_id,omitempty"`
- // Attribute to build path with. Default: "FileName".
- PathAttribute string `protobuf:"bytes,3,opt,name=path_attribute,json=pathAttribute,proto3" json:"path_attribute,omitempty"`
- // List of path components.
- Path []string `protobuf:"bytes,4,rep,name=path,proto3" json:"path,omitempty"`
- // List of attributes to include in response.
- Attributes []string `protobuf:"bytes,5,rep,name=attributes,proto3" json:"attributes,omitempty"`
- // Flag to return only the latest version of node.
- LatestOnly bool `protobuf:"varint,6,opt,name=latest_only,json=latestOnly,proto3" json:"latest_only,omitempty"`
- // Flag to return all stored attributes.
- AllAttributes bool `protobuf:"varint,7,opt,name=all_attributes,json=allAttributes,proto3" json:"all_attributes,omitempty"`
- // Bearer token in V2 format.
- BearerToken []byte `protobuf:"bytes,8,opt,name=bearer_token,json=bearerToken,proto3" json:"bearer_token,omitempty"`
-}
-
-func (x *GetNodeByPathRequest_Body) Reset() {
- *x = GetNodeByPathRequest_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_service_proto_msgTypes[28]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetNodeByPathRequest_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetNodeByPathRequest_Body) ProtoMessage() {}
-
-func (x *GetNodeByPathRequest_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_service_proto_msgTypes[28]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetNodeByPathRequest_Body.ProtoReflect.Descriptor instead.
-func (*GetNodeByPathRequest_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{8, 0}
-}
-
-func (x *GetNodeByPathRequest_Body) GetContainerId() []byte {
- if x != nil {
- return x.ContainerId
- }
- return nil
-}
-
-func (x *GetNodeByPathRequest_Body) GetTreeId() string {
- if x != nil {
- return x.TreeId
- }
- return ""
-}
-
-func (x *GetNodeByPathRequest_Body) GetPathAttribute() string {
- if x != nil {
- return x.PathAttribute
- }
- return ""
-}
-
-func (x *GetNodeByPathRequest_Body) GetPath() []string {
- if x != nil {
- return x.Path
- }
- return nil
-}
-
-func (x *GetNodeByPathRequest_Body) GetAttributes() []string {
- if x != nil {
- return x.Attributes
- }
- return nil
-}
-
-func (x *GetNodeByPathRequest_Body) GetLatestOnly() bool {
- if x != nil {
- return x.LatestOnly
- }
- return false
-}
-
-func (x *GetNodeByPathRequest_Body) GetAllAttributes() bool {
- if x != nil {
- return x.AllAttributes
- }
- return false
-}
-
-func (x *GetNodeByPathRequest_Body) GetBearerToken() []byte {
- if x != nil {
- return x.BearerToken
- }
- return nil
-}
-
-// Information about a single tree node.
-type GetNodeByPathResponse_Info struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Node ID.
- NodeId uint64 `protobuf:"varint,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
- // Timestamp of the last operation with the node.
- Timestamp uint64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
- // Node meta-information.
- Meta []*KeyValue `protobuf:"bytes,3,rep,name=meta,proto3" json:"meta,omitempty"`
- // Parent ID.
- ParentId uint64 `protobuf:"varint,4,opt,name=parent_id,json=parentId,proto3" json:"parent_id,omitempty"`
-}
-
-func (x *GetNodeByPathResponse_Info) Reset() {
- *x = GetNodeByPathResponse_Info{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_service_proto_msgTypes[29]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetNodeByPathResponse_Info) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetNodeByPathResponse_Info) ProtoMessage() {}
-
-func (x *GetNodeByPathResponse_Info) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_service_proto_msgTypes[29]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetNodeByPathResponse_Info.ProtoReflect.Descriptor instead.
-func (*GetNodeByPathResponse_Info) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{9, 0}
-}
-
-func (x *GetNodeByPathResponse_Info) GetNodeId() uint64 {
- if x != nil {
- return x.NodeId
- }
- return 0
-}
-
-func (x *GetNodeByPathResponse_Info) GetTimestamp() uint64 {
- if x != nil {
- return x.Timestamp
- }
- return 0
-}
-
-func (x *GetNodeByPathResponse_Info) GetMeta() []*KeyValue {
- if x != nil {
- return x.Meta
- }
- return nil
-}
-
-func (x *GetNodeByPathResponse_Info) GetParentId() uint64 {
- if x != nil {
- return x.ParentId
- }
- return 0
-}
-
-type GetNodeByPathResponse_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // List of nodes stored by path.
- Nodes []*GetNodeByPathResponse_Info `protobuf:"bytes,1,rep,name=nodes,proto3" json:"nodes,omitempty"`
-}
-
-func (x *GetNodeByPathResponse_Body) Reset() {
- *x = GetNodeByPathResponse_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_service_proto_msgTypes[30]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetNodeByPathResponse_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetNodeByPathResponse_Body) ProtoMessage() {}
-
-func (x *GetNodeByPathResponse_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_service_proto_msgTypes[30]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetNodeByPathResponse_Body.ProtoReflect.Descriptor instead.
-func (*GetNodeByPathResponse_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{9, 1}
-}
-
-func (x *GetNodeByPathResponse_Body) GetNodes() []*GetNodeByPathResponse_Info {
- if x != nil {
- return x.Nodes
- }
- return nil
-}
-
-type GetSubTreeRequest_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Container ID in V2 format.
- ContainerId []byte `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
- // The name of the tree.
- TreeId string `protobuf:"bytes,2,opt,name=tree_id,json=treeId,proto3" json:"tree_id,omitempty"`
- // IDs of the root nodes of a subtree forest.
- RootId []uint64 `protobuf:"varint,3,rep,name=root_id,json=rootId,proto3" json:"root_id,omitempty"`
- // Optional depth of the traversal. Zero means return only root.
- // Maximum depth is 10.
- Depth uint32 `protobuf:"varint,4,opt,name=depth,proto3" json:"depth,omitempty"`
- // Bearer token in V2 format.
- BearerToken []byte `protobuf:"bytes,5,opt,name=bearer_token,json=bearerToken,proto3" json:"bearer_token,omitempty"`
- // Result ordering.
- OrderBy *GetSubTreeRequest_Body_Order `protobuf:"bytes,6,opt,name=order_by,json=orderBy,proto3" json:"order_by,omitempty"`
-}
-
-func (x *GetSubTreeRequest_Body) Reset() {
- *x = GetSubTreeRequest_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_service_proto_msgTypes[31]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetSubTreeRequest_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetSubTreeRequest_Body) ProtoMessage() {}
-
-func (x *GetSubTreeRequest_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_service_proto_msgTypes[31]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetSubTreeRequest_Body.ProtoReflect.Descriptor instead.
-func (*GetSubTreeRequest_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{10, 0}
-}
-
-func (x *GetSubTreeRequest_Body) GetContainerId() []byte {
- if x != nil {
- return x.ContainerId
- }
- return nil
-}
-
-func (x *GetSubTreeRequest_Body) GetTreeId() string {
- if x != nil {
- return x.TreeId
- }
- return ""
-}
-
-func (x *GetSubTreeRequest_Body) GetRootId() []uint64 {
- if x != nil {
- return x.RootId
- }
- return nil
-}
-
-func (x *GetSubTreeRequest_Body) GetDepth() uint32 {
- if x != nil {
- return x.Depth
- }
- return 0
-}
-
-func (x *GetSubTreeRequest_Body) GetBearerToken() []byte {
- if x != nil {
- return x.BearerToken
- }
- return nil
-}
-
-func (x *GetSubTreeRequest_Body) GetOrderBy() *GetSubTreeRequest_Body_Order {
- if x != nil {
- return x.OrderBy
- }
- return nil
-}
-
-type GetSubTreeRequest_Body_Order struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Direction GetSubTreeRequest_Body_Order_Direction `protobuf:"varint,1,opt,name=direction,proto3,enum=tree.GetSubTreeRequest_Body_Order_Direction" json:"direction,omitempty"`
-}
-
-func (x *GetSubTreeRequest_Body_Order) Reset() {
- *x = GetSubTreeRequest_Body_Order{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_service_proto_msgTypes[32]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetSubTreeRequest_Body_Order) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetSubTreeRequest_Body_Order) ProtoMessage() {}
-
-func (x *GetSubTreeRequest_Body_Order) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_service_proto_msgTypes[32]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetSubTreeRequest_Body_Order.ProtoReflect.Descriptor instead.
-func (*GetSubTreeRequest_Body_Order) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{10, 0, 0}
-}
-
-func (x *GetSubTreeRequest_Body_Order) GetDirection() GetSubTreeRequest_Body_Order_Direction {
- if x != nil {
- return x.Direction
- }
- return GetSubTreeRequest_Body_Order_None
-}
-
-type GetSubTreeResponse_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // ID of the node.
- NodeId []uint64 `protobuf:"varint,1,rep,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
- // ID of the parent.
- ParentId []uint64 `protobuf:"varint,2,rep,name=parent_id,json=parentId,proto3" json:"parent_id,omitempty"`
- // Time node was first added to a tree.
- Timestamp []uint64 `protobuf:"varint,3,rep,name=timestamp,proto3" json:"timestamp,omitempty"`
- // Node meta-information.
- Meta []*KeyValue `protobuf:"bytes,4,rep,name=meta,proto3" json:"meta,omitempty"`
-}
-
-func (x *GetSubTreeResponse_Body) Reset() {
- *x = GetSubTreeResponse_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_service_proto_msgTypes[33]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetSubTreeResponse_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetSubTreeResponse_Body) ProtoMessage() {}
-
-func (x *GetSubTreeResponse_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_service_proto_msgTypes[33]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetSubTreeResponse_Body.ProtoReflect.Descriptor instead.
-func (*GetSubTreeResponse_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{11, 0}
-}
-
-func (x *GetSubTreeResponse_Body) GetNodeId() []uint64 {
- if x != nil {
- return x.NodeId
- }
- return nil
-}
-
-func (x *GetSubTreeResponse_Body) GetParentId() []uint64 {
- if x != nil {
- return x.ParentId
- }
- return nil
-}
-
-func (x *GetSubTreeResponse_Body) GetTimestamp() []uint64 {
- if x != nil {
- return x.Timestamp
- }
- return nil
-}
-
-func (x *GetSubTreeResponse_Body) GetMeta() []*KeyValue {
- if x != nil {
- return x.Meta
- }
- return nil
-}
-
-type TreeListRequest_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Container ID in V2 format.
- ContainerId []byte `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
-}
-
-func (x *TreeListRequest_Body) Reset() {
- *x = TreeListRequest_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_service_proto_msgTypes[34]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *TreeListRequest_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*TreeListRequest_Body) ProtoMessage() {}
-
-func (x *TreeListRequest_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_service_proto_msgTypes[34]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use TreeListRequest_Body.ProtoReflect.Descriptor instead.
-func (*TreeListRequest_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{12, 0}
-}
-
-func (x *TreeListRequest_Body) GetContainerId() []byte {
- if x != nil {
- return x.ContainerId
- }
- return nil
-}
-
-type TreeListResponse_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Tree IDs.
- Ids []string `protobuf:"bytes,1,rep,name=ids,proto3" json:"ids,omitempty"`
-}
-
-func (x *TreeListResponse_Body) Reset() {
- *x = TreeListResponse_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_service_proto_msgTypes[35]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *TreeListResponse_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*TreeListResponse_Body) ProtoMessage() {}
-
-func (x *TreeListResponse_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_service_proto_msgTypes[35]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use TreeListResponse_Body.ProtoReflect.Descriptor instead.
-func (*TreeListResponse_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{13, 0}
-}
-
-func (x *TreeListResponse_Body) GetIds() []string {
- if x != nil {
- return x.Ids
- }
- return nil
-}
-
-type ApplyRequest_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Container ID in V2 format.
- ContainerId []byte `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
- // The name of the tree.
- TreeId string `protobuf:"bytes,2,opt,name=tree_id,json=treeId,proto3" json:"tree_id,omitempty"`
- // Operation to be applied.
- Operation *LogMove `protobuf:"bytes,3,opt,name=operation,proto3" json:"operation,omitempty"`
-}
-
-func (x *ApplyRequest_Body) Reset() {
- *x = ApplyRequest_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_service_proto_msgTypes[36]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ApplyRequest_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ApplyRequest_Body) ProtoMessage() {}
-
-func (x *ApplyRequest_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_service_proto_msgTypes[36]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ApplyRequest_Body.ProtoReflect.Descriptor instead.
-func (*ApplyRequest_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{14, 0}
-}
-
-func (x *ApplyRequest_Body) GetContainerId() []byte {
- if x != nil {
- return x.ContainerId
- }
- return nil
-}
-
-func (x *ApplyRequest_Body) GetTreeId() string {
- if x != nil {
- return x.TreeId
- }
- return ""
-}
-
-func (x *ApplyRequest_Body) GetOperation() *LogMove {
- if x != nil {
- return x.Operation
- }
- return nil
-}
-
-type ApplyResponse_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-}
-
-func (x *ApplyResponse_Body) Reset() {
- *x = ApplyResponse_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_service_proto_msgTypes[37]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ApplyResponse_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ApplyResponse_Body) ProtoMessage() {}
-
-func (x *ApplyResponse_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_service_proto_msgTypes[37]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ApplyResponse_Body.ProtoReflect.Descriptor instead.
-func (*ApplyResponse_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{15, 0}
-}
-
-type GetOpLogRequest_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Container ID in V2 format.
- ContainerId []byte `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
- // The name of the tree.
- TreeId string `protobuf:"bytes,2,opt,name=tree_id,json=treeId,proto3" json:"tree_id,omitempty"`
- // Starting height to return logs from.
- Height uint64 `protobuf:"varint,3,opt,name=height,proto3" json:"height,omitempty"`
- // Amount of operations to return.
- Count uint64 `protobuf:"varint,4,opt,name=count,proto3" json:"count,omitempty"`
-}
-
-func (x *GetOpLogRequest_Body) Reset() {
- *x = GetOpLogRequest_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_service_proto_msgTypes[38]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetOpLogRequest_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetOpLogRequest_Body) ProtoMessage() {}
-
-func (x *GetOpLogRequest_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_service_proto_msgTypes[38]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetOpLogRequest_Body.ProtoReflect.Descriptor instead.
-func (*GetOpLogRequest_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{16, 0}
-}
-
-func (x *GetOpLogRequest_Body) GetContainerId() []byte {
- if x != nil {
- return x.ContainerId
- }
- return nil
-}
-
-func (x *GetOpLogRequest_Body) GetTreeId() string {
- if x != nil {
- return x.TreeId
- }
- return ""
-}
-
-func (x *GetOpLogRequest_Body) GetHeight() uint64 {
- if x != nil {
- return x.Height
- }
- return 0
-}
-
-func (x *GetOpLogRequest_Body) GetCount() uint64 {
- if x != nil {
- return x.Count
- }
- return 0
-}
-
-type GetOpLogResponse_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Operation on a tree.
- Operation *LogMove `protobuf:"bytes,1,opt,name=operation,proto3" json:"operation,omitempty"`
-}
-
-func (x *GetOpLogResponse_Body) Reset() {
- *x = GetOpLogResponse_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_service_proto_msgTypes[39]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetOpLogResponse_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetOpLogResponse_Body) ProtoMessage() {}
-
-func (x *GetOpLogResponse_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_service_proto_msgTypes[39]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetOpLogResponse_Body.ProtoReflect.Descriptor instead.
-func (*GetOpLogResponse_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{17, 0}
-}
-
-func (x *GetOpLogResponse_Body) GetOperation() *LogMove {
- if x != nil {
- return x.Operation
- }
- return nil
-}
-
-type HealthcheckResponse_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-}
-
-func (x *HealthcheckResponse_Body) Reset() {
- *x = HealthcheckResponse_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_service_proto_msgTypes[40]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *HealthcheckResponse_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*HealthcheckResponse_Body) ProtoMessage() {}
-
-func (x *HealthcheckResponse_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_service_proto_msgTypes[40]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use HealthcheckResponse_Body.ProtoReflect.Descriptor instead.
-func (*HealthcheckResponse_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{18, 0}
-}
-
-type HealthcheckRequest_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-}
-
-func (x *HealthcheckRequest_Body) Reset() {
- *x = HealthcheckRequest_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_service_proto_msgTypes[41]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *HealthcheckRequest_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*HealthcheckRequest_Body) ProtoMessage() {}
-
-func (x *HealthcheckRequest_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_service_proto_msgTypes[41]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use HealthcheckRequest_Body.ProtoReflect.Descriptor instead.
-func (*HealthcheckRequest_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_service_proto_rawDescGZIP(), []int{19, 0}
-}
-
-var File_pkg_services_tree_service_proto protoreflect.FileDescriptor
-
-var file_pkg_services_tree_service_proto_rawDesc = []byte{
- 0x0a, 0x1f, 0x70, 0x6b, 0x67, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x74,
- 0x72, 0x65, 0x65, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x12, 0x04, 0x74, 0x72, 0x65, 0x65, 0x1a, 0x1d, 0x70, 0x6b, 0x67, 0x2f, 0x73, 0x65, 0x72,
- 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x74, 0x72, 0x65, 0x65, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73,
- 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x8f, 0x02, 0x0a, 0x0a, 0x41, 0x64, 0x64, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x29, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x41, 0x64, 0x64, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79,
- 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61,
- 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a,
- 0xa6, 0x01, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74,
- 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b,
- 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x74,
- 0x72, 0x65, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x72,
- 0x65, 0x65, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x69,
- 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x49,
- 0x64, 0x12, 0x22, 0x0a, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32,
- 0x0e, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52,
- 0x04, 0x6d, 0x65, 0x74, 0x61, 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x65, 0x61, 0x72, 0x65, 0x72, 0x5f,
- 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x62, 0x65, 0x61,
- 0x72, 0x65, 0x72, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x89, 0x01, 0x0a, 0x0b, 0x41, 0x64, 0x64,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x41, 0x64,
- 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04,
- 0x62, 0x6f, 0x64, 0x79, 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72,
- 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x53,
- 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74,
- 0x75, 0x72, 0x65, 0x1a, 0x1f, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x17, 0x0a, 0x07, 0x6e,
- 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x6e, 0x6f,
- 0x64, 0x65, 0x49, 0x64, 0x22, 0xb9, 0x02, 0x0a, 0x10, 0x41, 0x64, 0x64, 0x42, 0x79, 0x50, 0x61,
- 0x74, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2f, 0x0a, 0x04, 0x62, 0x6f, 0x64,
- 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x41,
- 0x64, 0x64, 0x42, 0x79, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e,
- 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x69,
- 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e,
- 0x74, 0x72, 0x65, 0x65, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09,
- 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0xc4, 0x01, 0x0a, 0x04, 0x42, 0x6f,
- 0x64, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f,
- 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69,
- 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x72, 0x65, 0x65, 0x5f, 0x69, 0x64,
- 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x72, 0x65, 0x65, 0x49, 0x64, 0x12, 0x25,
- 0x0a, 0x0e, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65,
- 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x61, 0x74, 0x68, 0x41, 0x74, 0x74, 0x72,
- 0x69, 0x62, 0x75, 0x74, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x04, 0x20,
- 0x03, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x22, 0x0a, 0x04, 0x6d, 0x65, 0x74,
- 0x61, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x4b,
- 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x12, 0x21, 0x0a,
- 0x0c, 0x62, 0x65, 0x61, 0x72, 0x65, 0x72, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x06, 0x20,
- 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x62, 0x65, 0x61, 0x72, 0x65, 0x72, 0x54, 0x6f, 0x6b, 0x65, 0x6e,
- 0x22, 0xaf, 0x01, 0x0a, 0x11, 0x41, 0x64, 0x64, 0x42, 0x79, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65,
- 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x30, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x41, 0x64, 0x64, 0x42,
- 0x79, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f,
- 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e,
- 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x72,
- 0x65, 0x65, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69,
- 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x39, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12,
- 0x14, 0x0a, 0x05, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x04, 0x52, 0x05,
- 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f,
- 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74,
- 0x49, 0x64, 0x22, 0xec, 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x12, 0x2c, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f,
- 0x64, 0x79, 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x53, 0x69, 0x67,
- 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72,
- 0x65, 0x1a, 0x7e, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e,
- 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52,
- 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07,
- 0x74, 0x72, 0x65, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74,
- 0x72, 0x65, 0x65, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64,
- 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x12, 0x21,
- 0x0a, 0x0c, 0x62, 0x65, 0x61, 0x72, 0x65, 0x72, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x04,
- 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x62, 0x65, 0x61, 0x72, 0x65, 0x72, 0x54, 0x6f, 0x6b, 0x65,
- 0x6e, 0x22, 0x76, 0x0a, 0x0e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f,
- 0x6e, 0x73, 0x65, 0x12, 0x2d, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x19, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x52,
- 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f,
- 0x64, 0x79, 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x53, 0x69, 0x67,
- 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72,
- 0x65, 0x1a, 0x06, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x22, 0xaa, 0x02, 0x0a, 0x0b, 0x4d, 0x6f,
- 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2a, 0x0a, 0x04, 0x62, 0x6f, 0x64,
- 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x4d,
- 0x6f, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52,
- 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75,
- 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e,
- 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61,
- 0x74, 0x75, 0x72, 0x65, 0x1a, 0xbf, 0x01, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x21, 0x0a,
- 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64,
- 0x12, 0x17, 0x0a, 0x07, 0x74, 0x72, 0x65, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x06, 0x74, 0x72, 0x65, 0x65, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x72,
- 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x70, 0x61,
- 0x72, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69,
- 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x12,
- 0x22, 0x0a, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e,
- 0x74, 0x72, 0x65, 0x65, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x04, 0x6d,
- 0x65, 0x74, 0x61, 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x65, 0x61, 0x72, 0x65, 0x72, 0x5f, 0x74, 0x6f,
- 0x6b, 0x65, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x62, 0x65, 0x61, 0x72, 0x65,
- 0x72, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x72, 0x0a, 0x0c, 0x4d, 0x6f, 0x76, 0x65, 0x52, 0x65,
- 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2b, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x4d, 0x6f, 0x76, 0x65,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62,
- 0x6f, 0x64, 0x79, 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65,
- 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x53, 0x69,
- 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75,
- 0x72, 0x65, 0x1a, 0x06, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x22, 0x85, 0x03, 0x0a, 0x14, 0x47,
- 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x42, 0x79, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x12, 0x33, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x1f, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65,
- 0x42, 0x79, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f,
- 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e,
- 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x72,
- 0x65, 0x65, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69,
- 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x88, 0x02, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79,
- 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65,
- 0x72, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x72, 0x65, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x72, 0x65, 0x65, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x0e,
- 0x70, 0x61, 0x74, 0x68, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x18, 0x03,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x61, 0x74, 0x68, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62,
- 0x75, 0x74, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x04, 0x20, 0x03, 0x28,
- 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1e, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69,
- 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x74, 0x74,
- 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x6c, 0x61, 0x74, 0x65, 0x73,
- 0x74, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x6c, 0x61,
- 0x74, 0x65, 0x73, 0x74, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x25, 0x0a, 0x0e, 0x61, 0x6c, 0x6c, 0x5f,
- 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08,
- 0x52, 0x0d, 0x61, 0x6c, 0x6c, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12,
- 0x21, 0x0a, 0x0c, 0x62, 0x65, 0x61, 0x72, 0x65, 0x72, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18,
- 0x08, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x62, 0x65, 0x61, 0x72, 0x65, 0x72, 0x54, 0x6f, 0x6b,
- 0x65, 0x6e, 0x22, 0xbc, 0x02, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x42, 0x79,
- 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x34, 0x0a, 0x04,
- 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x74, 0x72, 0x65,
- 0x65, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x42, 0x79, 0x50, 0x61, 0x74, 0x68, 0x52,
- 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f,
- 0x64, 0x79, 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x53, 0x69, 0x67,
- 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72,
- 0x65, 0x1a, 0x7e, 0x0a, 0x04, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x17, 0x0a, 0x07, 0x6e, 0x6f, 0x64,
- 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65,
- 0x49, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70,
- 0x12, 0x22, 0x0a, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e,
- 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x04,
- 0x6d, 0x65, 0x74, 0x61, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x69,
- 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x49,
- 0x64, 0x1a, 0x3e, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x36, 0x0a, 0x05, 0x6e, 0x6f, 0x64,
- 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e,
- 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x42, 0x79, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x05, 0x6e, 0x6f, 0x64, 0x65,
- 0x73, 0x22, 0xc3, 0x03, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x53, 0x75, 0x62, 0x54, 0x72, 0x65, 0x65,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x30, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x47, 0x65, 0x74,
- 0x53, 0x75, 0x62, 0x54, 0x72, 0x65, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42,
- 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x69, 0x67,
- 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74,
- 0x72, 0x65, 0x65, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73,
- 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0xcc, 0x02, 0x0a, 0x04, 0x42, 0x6f, 0x64,
- 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69,
- 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e,
- 0x65, 0x72, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x72, 0x65, 0x65, 0x5f, 0x69, 0x64, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x72, 0x65, 0x65, 0x49, 0x64, 0x12, 0x1b, 0x0a,
- 0x07, 0x72, 0x6f, 0x6f, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x03, 0x28, 0x04, 0x42, 0x02,
- 0x10, 0x00, 0x52, 0x06, 0x72, 0x6f, 0x6f, 0x74, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x64, 0x65,
- 0x70, 0x74, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x64, 0x65, 0x70, 0x74, 0x68,
- 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x65, 0x61, 0x72, 0x65, 0x72, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e,
- 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x62, 0x65, 0x61, 0x72, 0x65, 0x72, 0x54, 0x6f,
- 0x6b, 0x65, 0x6e, 0x12, 0x3d, 0x0a, 0x08, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x5f, 0x62, 0x79, 0x18,
- 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x47, 0x65, 0x74,
- 0x53, 0x75, 0x62, 0x54, 0x72, 0x65, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42,
- 0x6f, 0x64, 0x79, 0x2e, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x52, 0x07, 0x6f, 0x72, 0x64, 0x65, 0x72,
- 0x42, 0x79, 0x1a, 0x73, 0x0a, 0x05, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x12, 0x4a, 0x0a, 0x09, 0x64,
- 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2c,
- 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x75, 0x62, 0x54, 0x72, 0x65, 0x65,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x2e, 0x4f, 0x72, 0x64,
- 0x65, 0x72, 0x2e, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x64, 0x69,
- 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x1e, 0x0a, 0x09, 0x44, 0x69, 0x72, 0x65, 0x63,
- 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x6f, 0x6e, 0x65, 0x10, 0x00, 0x12, 0x07,
- 0x0a, 0x03, 0x41, 0x73, 0x63, 0x10, 0x01, 0x22, 0x83, 0x02, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x53,
- 0x75, 0x62, 0x54, 0x72, 0x65, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x31,
- 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x74,
- 0x72, 0x65, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x75, 0x62, 0x54, 0x72, 0x65, 0x65, 0x52, 0x65,
- 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64,
- 0x79, 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x53, 0x69, 0x67, 0x6e,
- 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65,
- 0x1a, 0x8a, 0x01, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x1b, 0x0a, 0x07, 0x6e, 0x6f, 0x64,
- 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x03, 0x28, 0x04, 0x42, 0x02, 0x10, 0x00, 0x52, 0x06,
- 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x09, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74,
- 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x04, 0x42, 0x02, 0x10, 0x00, 0x52, 0x08, 0x70,
- 0x61, 0x72, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x20, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73,
- 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x03, 0x28, 0x04, 0x42, 0x02, 0x10, 0x00, 0x52, 0x09,
- 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x22, 0x0a, 0x04, 0x6d, 0x65, 0x74,
- 0x61, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x4b,
- 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x22, 0x9b, 0x01,
- 0x0a, 0x0f, 0x54, 0x72, 0x65, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x12, 0x2e, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x1a, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x54, 0x72, 0x65, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64,
- 0x79, 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x53, 0x69, 0x67, 0x6e,
- 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65,
- 0x1a, 0x29, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74,
- 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b,
- 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x22, 0x8c, 0x01, 0x0a, 0x10,
- 0x54, 0x72, 0x65, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
- 0x12, 0x2f, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b,
- 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x54, 0x72, 0x65, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65,
- 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64,
- 0x79, 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x53, 0x69, 0x67, 0x6e,
- 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65,
- 0x1a, 0x18, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x69, 0x64, 0x73, 0x18,
- 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x03, 0x69, 0x64, 0x73, 0x22, 0xdb, 0x01, 0x0a, 0x0c, 0x41,
- 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x04, 0x62,
- 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x72, 0x65, 0x65,
- 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f,
- 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e,
- 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x72,
- 0x65, 0x65, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69,
- 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x6f, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12,
- 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72,
- 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x72, 0x65, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x72, 0x65, 0x65, 0x49, 0x64, 0x12, 0x2b, 0x0a, 0x09, 0x6f,
- 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d,
- 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x4c, 0x6f, 0x67, 0x4d, 0x6f, 0x76, 0x65, 0x52, 0x09, 0x6f,
- 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x74, 0x0a, 0x0d, 0x41, 0x70, 0x70, 0x6c,
- 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2c, 0x0a, 0x04, 0x62, 0x6f, 0x64,
- 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x41,
- 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64,
- 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61,
- 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x72, 0x65,
- 0x65, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67,
- 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x06, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x22, 0xe2,
- 0x01, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x4f, 0x70, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x12, 0x2e, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x1a, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x70, 0x4c, 0x6f, 0x67,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f,
- 0x64, 0x79, 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x53, 0x69, 0x67,
- 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72,
- 0x65, 0x1a, 0x70, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e,
- 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52,
- 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07,
- 0x74, 0x72, 0x65, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74,
- 0x72, 0x65, 0x65, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18,
- 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x14, 0x0a,
- 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x63, 0x6f,
- 0x75, 0x6e, 0x74, 0x22, 0xa7, 0x01, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x4f, 0x70, 0x4c, 0x6f, 0x67,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x47, 0x65,
- 0x74, 0x4f, 0x70, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42,
- 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x69, 0x67,
- 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74,
- 0x72, 0x65, 0x65, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73,
- 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x33, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79,
- 0x12, 0x2b, 0x0a, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x4c, 0x6f, 0x67, 0x4d, 0x6f,
- 0x76, 0x65, 0x52, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x80, 0x01,
- 0x0a, 0x13, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74,
- 0x68, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42,
- 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x69, 0x67,
- 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74,
- 0x72, 0x65, 0x65, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73,
- 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x06, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79,
- 0x22, 0x7e, 0x0a, 0x12, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x31, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x6c,
- 0x74, 0x68, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42,
- 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x69, 0x67,
- 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74,
- 0x72, 0x65, 0x65, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73,
- 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x06, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79,
- 0x32, 0xd6, 0x04, 0x0a, 0x0b, 0x54, 0x72, 0x65, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
- 0x12, 0x2a, 0x0a, 0x03, 0x41, 0x64, 0x64, 0x12, 0x10, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x41,
- 0x64, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x11, 0x2e, 0x74, 0x72, 0x65, 0x65,
- 0x2e, 0x41, 0x64, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3c, 0x0a, 0x09,
- 0x41, 0x64, 0x64, 0x42, 0x79, 0x50, 0x61, 0x74, 0x68, 0x12, 0x16, 0x2e, 0x74, 0x72, 0x65, 0x65,
- 0x2e, 0x41, 0x64, 0x64, 0x42, 0x79, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x1a, 0x17, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x41, 0x64, 0x64, 0x42, 0x79, 0x50, 0x61,
- 0x74, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x06, 0x52, 0x65,
- 0x6d, 0x6f, 0x76, 0x65, 0x12, 0x13, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x52, 0x65, 0x6d, 0x6f,
- 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x14, 0x2e, 0x74, 0x72, 0x65, 0x65,
- 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
- 0x2d, 0x0a, 0x04, 0x4d, 0x6f, 0x76, 0x65, 0x12, 0x11, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x4d,
- 0x6f, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x12, 0x2e, 0x74, 0x72, 0x65,
- 0x65, 0x2e, 0x4d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x48,
- 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x42, 0x79, 0x50, 0x61, 0x74, 0x68, 0x12,
- 0x1a, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x42, 0x79,
- 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x74, 0x72,
- 0x65, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x42, 0x79, 0x50, 0x61, 0x74, 0x68,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x41, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x53,
- 0x75, 0x62, 0x54, 0x72, 0x65, 0x65, 0x12, 0x17, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x47, 0x65,
- 0x74, 0x53, 0x75, 0x62, 0x54, 0x72, 0x65, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
- 0x18, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x75, 0x62, 0x54, 0x72, 0x65,
- 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x12, 0x39, 0x0a, 0x08, 0x54,
- 0x72, 0x65, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x15, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x54,
- 0x72, 0x65, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16,
- 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x54, 0x72, 0x65, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65,
- 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x30, 0x0a, 0x05, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x12,
- 0x12, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x1a, 0x13, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3b, 0x0a, 0x08, 0x47, 0x65, 0x74, 0x4f,
- 0x70, 0x4c, 0x6f, 0x67, 0x12, 0x15, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x4f,
- 0x70, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x74, 0x72,
- 0x65, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x70, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f,
- 0x6e, 0x73, 0x65, 0x30, 0x01, 0x12, 0x42, 0x0a, 0x0b, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x63,
- 0x68, 0x65, 0x63, 0x6b, 0x12, 0x18, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x6c,
- 0x74, 0x68, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19,
- 0x2e, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x63, 0x68, 0x65, 0x63,
- 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x3e, 0x5a, 0x3c, 0x67, 0x69, 0x74,
- 0x2e, 0x66, 0x72, 0x6f, 0x73, 0x74, 0x66, 0x73, 0x2e, 0x69, 0x6e, 0x66, 0x6f, 0x2f, 0x54, 0x72,
- 0x75, 0x65, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x4c, 0x61, 0x62, 0x2f, 0x66, 0x72, 0x6f, 0x73, 0x74,
- 0x66, 0x73, 0x2d, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x73, 0x65, 0x72, 0x76,
- 0x69, 0x63, 0x65, 0x73, 0x2f, 0x74, 0x72, 0x65, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x33,
-}
-
-var (
- file_pkg_services_tree_service_proto_rawDescOnce sync.Once
- file_pkg_services_tree_service_proto_rawDescData = file_pkg_services_tree_service_proto_rawDesc
-)
-
-func file_pkg_services_tree_service_proto_rawDescGZIP() []byte {
- file_pkg_services_tree_service_proto_rawDescOnce.Do(func() {
- file_pkg_services_tree_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_pkg_services_tree_service_proto_rawDescData)
- })
- return file_pkg_services_tree_service_proto_rawDescData
-}
-
-var file_pkg_services_tree_service_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
-var file_pkg_services_tree_service_proto_msgTypes = make([]protoimpl.MessageInfo, 42)
-var file_pkg_services_tree_service_proto_goTypes = []interface{}{
- (GetSubTreeRequest_Body_Order_Direction)(0), // 0: tree.GetSubTreeRequest.Body.Order.Direction
- (*AddRequest)(nil), // 1: tree.AddRequest
- (*AddResponse)(nil), // 2: tree.AddResponse
- (*AddByPathRequest)(nil), // 3: tree.AddByPathRequest
- (*AddByPathResponse)(nil), // 4: tree.AddByPathResponse
- (*RemoveRequest)(nil), // 5: tree.RemoveRequest
- (*RemoveResponse)(nil), // 6: tree.RemoveResponse
- (*MoveRequest)(nil), // 7: tree.MoveRequest
- (*MoveResponse)(nil), // 8: tree.MoveResponse
- (*GetNodeByPathRequest)(nil), // 9: tree.GetNodeByPathRequest
- (*GetNodeByPathResponse)(nil), // 10: tree.GetNodeByPathResponse
- (*GetSubTreeRequest)(nil), // 11: tree.GetSubTreeRequest
- (*GetSubTreeResponse)(nil), // 12: tree.GetSubTreeResponse
- (*TreeListRequest)(nil), // 13: tree.TreeListRequest
- (*TreeListResponse)(nil), // 14: tree.TreeListResponse
- (*ApplyRequest)(nil), // 15: tree.ApplyRequest
- (*ApplyResponse)(nil), // 16: tree.ApplyResponse
- (*GetOpLogRequest)(nil), // 17: tree.GetOpLogRequest
- (*GetOpLogResponse)(nil), // 18: tree.GetOpLogResponse
- (*HealthcheckResponse)(nil), // 19: tree.HealthcheckResponse
- (*HealthcheckRequest)(nil), // 20: tree.HealthcheckRequest
- (*AddRequest_Body)(nil), // 21: tree.AddRequest.Body
- (*AddResponse_Body)(nil), // 22: tree.AddResponse.Body
- (*AddByPathRequest_Body)(nil), // 23: tree.AddByPathRequest.Body
- (*AddByPathResponse_Body)(nil), // 24: tree.AddByPathResponse.Body
- (*RemoveRequest_Body)(nil), // 25: tree.RemoveRequest.Body
- (*RemoveResponse_Body)(nil), // 26: tree.RemoveResponse.Body
- (*MoveRequest_Body)(nil), // 27: tree.MoveRequest.Body
- (*MoveResponse_Body)(nil), // 28: tree.MoveResponse.Body
- (*GetNodeByPathRequest_Body)(nil), // 29: tree.GetNodeByPathRequest.Body
- (*GetNodeByPathResponse_Info)(nil), // 30: tree.GetNodeByPathResponse.Info
- (*GetNodeByPathResponse_Body)(nil), // 31: tree.GetNodeByPathResponse.Body
- (*GetSubTreeRequest_Body)(nil), // 32: tree.GetSubTreeRequest.Body
- (*GetSubTreeRequest_Body_Order)(nil), // 33: tree.GetSubTreeRequest.Body.Order
- (*GetSubTreeResponse_Body)(nil), // 34: tree.GetSubTreeResponse.Body
- (*TreeListRequest_Body)(nil), // 35: tree.TreeListRequest.Body
- (*TreeListResponse_Body)(nil), // 36: tree.TreeListResponse.Body
- (*ApplyRequest_Body)(nil), // 37: tree.ApplyRequest.Body
- (*ApplyResponse_Body)(nil), // 38: tree.ApplyResponse.Body
- (*GetOpLogRequest_Body)(nil), // 39: tree.GetOpLogRequest.Body
- (*GetOpLogResponse_Body)(nil), // 40: tree.GetOpLogResponse.Body
- (*HealthcheckResponse_Body)(nil), // 41: tree.HealthcheckResponse.Body
- (*HealthcheckRequest_Body)(nil), // 42: tree.HealthcheckRequest.Body
- (*Signature)(nil), // 43: tree.Signature
- (*KeyValue)(nil), // 44: tree.KeyValue
- (*LogMove)(nil), // 45: tree.LogMove
-}
-var file_pkg_services_tree_service_proto_depIdxs = []int32{
- 21, // 0: tree.AddRequest.body:type_name -> tree.AddRequest.Body
- 43, // 1: tree.AddRequest.signature:type_name -> tree.Signature
- 22, // 2: tree.AddResponse.body:type_name -> tree.AddResponse.Body
- 43, // 3: tree.AddResponse.signature:type_name -> tree.Signature
- 23, // 4: tree.AddByPathRequest.body:type_name -> tree.AddByPathRequest.Body
- 43, // 5: tree.AddByPathRequest.signature:type_name -> tree.Signature
- 24, // 6: tree.AddByPathResponse.body:type_name -> tree.AddByPathResponse.Body
- 43, // 7: tree.AddByPathResponse.signature:type_name -> tree.Signature
- 25, // 8: tree.RemoveRequest.body:type_name -> tree.RemoveRequest.Body
- 43, // 9: tree.RemoveRequest.signature:type_name -> tree.Signature
- 26, // 10: tree.RemoveResponse.body:type_name -> tree.RemoveResponse.Body
- 43, // 11: tree.RemoveResponse.signature:type_name -> tree.Signature
- 27, // 12: tree.MoveRequest.body:type_name -> tree.MoveRequest.Body
- 43, // 13: tree.MoveRequest.signature:type_name -> tree.Signature
- 28, // 14: tree.MoveResponse.body:type_name -> tree.MoveResponse.Body
- 43, // 15: tree.MoveResponse.signature:type_name -> tree.Signature
- 29, // 16: tree.GetNodeByPathRequest.body:type_name -> tree.GetNodeByPathRequest.Body
- 43, // 17: tree.GetNodeByPathRequest.signature:type_name -> tree.Signature
- 31, // 18: tree.GetNodeByPathResponse.body:type_name -> tree.GetNodeByPathResponse.Body
- 43, // 19: tree.GetNodeByPathResponse.signature:type_name -> tree.Signature
- 32, // 20: tree.GetSubTreeRequest.body:type_name -> tree.GetSubTreeRequest.Body
- 43, // 21: tree.GetSubTreeRequest.signature:type_name -> tree.Signature
- 34, // 22: tree.GetSubTreeResponse.body:type_name -> tree.GetSubTreeResponse.Body
- 43, // 23: tree.GetSubTreeResponse.signature:type_name -> tree.Signature
- 35, // 24: tree.TreeListRequest.body:type_name -> tree.TreeListRequest.Body
- 43, // 25: tree.TreeListRequest.signature:type_name -> tree.Signature
- 36, // 26: tree.TreeListResponse.body:type_name -> tree.TreeListResponse.Body
- 43, // 27: tree.TreeListResponse.signature:type_name -> tree.Signature
- 37, // 28: tree.ApplyRequest.body:type_name -> tree.ApplyRequest.Body
- 43, // 29: tree.ApplyRequest.signature:type_name -> tree.Signature
- 38, // 30: tree.ApplyResponse.body:type_name -> tree.ApplyResponse.Body
- 43, // 31: tree.ApplyResponse.signature:type_name -> tree.Signature
- 39, // 32: tree.GetOpLogRequest.body:type_name -> tree.GetOpLogRequest.Body
- 43, // 33: tree.GetOpLogRequest.signature:type_name -> tree.Signature
- 40, // 34: tree.GetOpLogResponse.body:type_name -> tree.GetOpLogResponse.Body
- 43, // 35: tree.GetOpLogResponse.signature:type_name -> tree.Signature
- 41, // 36: tree.HealthcheckResponse.body:type_name -> tree.HealthcheckResponse.Body
- 43, // 37: tree.HealthcheckResponse.signature:type_name -> tree.Signature
- 42, // 38: tree.HealthcheckRequest.body:type_name -> tree.HealthcheckRequest.Body
- 43, // 39: tree.HealthcheckRequest.signature:type_name -> tree.Signature
- 44, // 40: tree.AddRequest.Body.meta:type_name -> tree.KeyValue
- 44, // 41: tree.AddByPathRequest.Body.meta:type_name -> tree.KeyValue
- 44, // 42: tree.MoveRequest.Body.meta:type_name -> tree.KeyValue
- 44, // 43: tree.GetNodeByPathResponse.Info.meta:type_name -> tree.KeyValue
- 30, // 44: tree.GetNodeByPathResponse.Body.nodes:type_name -> tree.GetNodeByPathResponse.Info
- 33, // 45: tree.GetSubTreeRequest.Body.order_by:type_name -> tree.GetSubTreeRequest.Body.Order
- 0, // 46: tree.GetSubTreeRequest.Body.Order.direction:type_name -> tree.GetSubTreeRequest.Body.Order.Direction
- 44, // 47: tree.GetSubTreeResponse.Body.meta:type_name -> tree.KeyValue
- 45, // 48: tree.ApplyRequest.Body.operation:type_name -> tree.LogMove
- 45, // 49: tree.GetOpLogResponse.Body.operation:type_name -> tree.LogMove
- 1, // 50: tree.TreeService.Add:input_type -> tree.AddRequest
- 3, // 51: tree.TreeService.AddByPath:input_type -> tree.AddByPathRequest
- 5, // 52: tree.TreeService.Remove:input_type -> tree.RemoveRequest
- 7, // 53: tree.TreeService.Move:input_type -> tree.MoveRequest
- 9, // 54: tree.TreeService.GetNodeByPath:input_type -> tree.GetNodeByPathRequest
- 11, // 55: tree.TreeService.GetSubTree:input_type -> tree.GetSubTreeRequest
- 13, // 56: tree.TreeService.TreeList:input_type -> tree.TreeListRequest
- 15, // 57: tree.TreeService.Apply:input_type -> tree.ApplyRequest
- 17, // 58: tree.TreeService.GetOpLog:input_type -> tree.GetOpLogRequest
- 20, // 59: tree.TreeService.Healthcheck:input_type -> tree.HealthcheckRequest
- 2, // 60: tree.TreeService.Add:output_type -> tree.AddResponse
- 4, // 61: tree.TreeService.AddByPath:output_type -> tree.AddByPathResponse
- 6, // 62: tree.TreeService.Remove:output_type -> tree.RemoveResponse
- 8, // 63: tree.TreeService.Move:output_type -> tree.MoveResponse
- 10, // 64: tree.TreeService.GetNodeByPath:output_type -> tree.GetNodeByPathResponse
- 12, // 65: tree.TreeService.GetSubTree:output_type -> tree.GetSubTreeResponse
- 14, // 66: tree.TreeService.TreeList:output_type -> tree.TreeListResponse
- 16, // 67: tree.TreeService.Apply:output_type -> tree.ApplyResponse
- 18, // 68: tree.TreeService.GetOpLog:output_type -> tree.GetOpLogResponse
- 19, // 69: tree.TreeService.Healthcheck:output_type -> tree.HealthcheckResponse
- 60, // [60:70] is the sub-list for method output_type
- 50, // [50:60] is the sub-list for method input_type
- 50, // [50:50] is the sub-list for extension type_name
- 50, // [50:50] is the sub-list for extension extendee
- 0, // [0:50] is the sub-list for field type_name
-}
-
-func init() { file_pkg_services_tree_service_proto_init() }
-func file_pkg_services_tree_service_proto_init() {
- if File_pkg_services_tree_service_proto != nil {
- return
- }
- file_pkg_services_tree_types_proto_init()
- if !protoimpl.UnsafeEnabled {
- file_pkg_services_tree_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*AddRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_tree_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*AddResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_tree_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*AddByPathRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_tree_service_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*AddByPathResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_tree_service_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RemoveRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_tree_service_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RemoveResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_tree_service_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*MoveRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_tree_service_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*MoveResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_tree_service_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetNodeByPathRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_tree_service_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetNodeByPathResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_tree_service_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetSubTreeRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_tree_service_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetSubTreeResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_tree_service_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*TreeListRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_tree_service_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*TreeListResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_tree_service_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ApplyRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_tree_service_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ApplyResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_tree_service_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetOpLogRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_tree_service_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetOpLogResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_tree_service_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*HealthcheckResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_tree_service_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*HealthcheckRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_tree_service_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*AddRequest_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_tree_service_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*AddResponse_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_tree_service_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*AddByPathRequest_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_tree_service_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*AddByPathResponse_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_tree_service_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RemoveRequest_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_tree_service_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RemoveResponse_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_tree_service_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*MoveRequest_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_tree_service_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*MoveResponse_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_tree_service_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetNodeByPathRequest_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_tree_service_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetNodeByPathResponse_Info); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_tree_service_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetNodeByPathResponse_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_tree_service_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetSubTreeRequest_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_tree_service_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetSubTreeRequest_Body_Order); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_tree_service_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetSubTreeResponse_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_tree_service_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*TreeListRequest_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_tree_service_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*TreeListResponse_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_tree_service_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ApplyRequest_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_tree_service_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ApplyResponse_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_tree_service_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetOpLogRequest_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_tree_service_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetOpLogResponse_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_tree_service_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*HealthcheckResponse_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_tree_service_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*HealthcheckRequest_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
- type x struct{}
- out := protoimpl.TypeBuilder{
- File: protoimpl.DescBuilder{
- GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_pkg_services_tree_service_proto_rawDesc,
- NumEnums: 1,
- NumMessages: 42,
- NumExtensions: 0,
- NumServices: 1,
- },
- GoTypes: file_pkg_services_tree_service_proto_goTypes,
- DependencyIndexes: file_pkg_services_tree_service_proto_depIdxs,
- EnumInfos: file_pkg_services_tree_service_proto_enumTypes,
- MessageInfos: file_pkg_services_tree_service_proto_msgTypes,
- }.Build()
- File_pkg_services_tree_service_proto = out.File
- file_pkg_services_tree_service_proto_rawDesc = nil
- file_pkg_services_tree_service_proto_goTypes = nil
- file_pkg_services_tree_service_proto_depIdxs = nil
-}
diff --git a/pkg/services/tree/service_frostfs.pb.go b/pkg/services/tree/service_frostfs.pb.go
index 1a49c5c0c..c4d44253d 100644
--- a/pkg/services/tree/service_frostfs.pb.go
+++ b/pkg/services/tree/service_frostfs.pb.go
@@ -3,11 +3,32 @@
package tree
import (
- binary "encoding/binary"
+ json "encoding/json"
+ fmt "fmt"
+ pool "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/pool"
+ proto "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/proto"
+ encoding "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/proto/encoding"
+ easyproto "github.com/VictoriaMetrics/easyproto"
+ jlexer "github.com/mailru/easyjson/jlexer"
+ jwriter "github.com/mailru/easyjson/jwriter"
protowire "google.golang.org/protobuf/encoding/protowire"
+ strconv "strconv"
)
-import "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/proto"
+type AddRequest_Body struct {
+ ContainerId []byte `json:"containerId"`
+ TreeId string `json:"treeId"`
+ ParentId uint64 `json:"parentId"`
+ Meta []*KeyValue `json:"meta"`
+ BearerToken []byte `json:"bearerToken"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*AddRequest_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*AddRequest_Body)(nil)
+ _ json.Marshaler = (*AddRequest_Body)(nil)
+ _ json.Unmarshaler = (*AddRequest_Body)(nil)
+)
// StableSize returns the size of x in protobuf format.
//
@@ -26,32 +47,262 @@ func (x *AddRequest_Body) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *AddRequest_Body) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.BytesMarshal(1, buf[offset:], x.ContainerId)
- offset += proto.StringMarshal(2, buf[offset:], x.TreeId)
- offset += proto.UInt64Marshal(3, buf[offset:], x.ParentId)
- for i := range x.Meta {
- offset += proto.NestedStructureMarshal(4, buf[offset:], x.Meta[i])
- }
- offset += proto.BytesMarshal(5, buf[offset:], x.BearerToken)
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *AddRequest_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *AddRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if len(x.ContainerId) != 0 {
+ mm.AppendBytes(1, x.ContainerId)
+ }
+ if len(x.TreeId) != 0 {
+ mm.AppendString(2, x.TreeId)
+ }
+ if x.ParentId != 0 {
+ mm.AppendUint64(3, x.ParentId)
+ }
+ for i := range x.Meta {
+ if x.Meta[i] != nil && x.Meta[i].StableSize() != 0 {
+ x.Meta[i].EmitProtobuf(mm.AppendMessage(4))
+ }
+ }
+ if len(x.BearerToken) != 0 {
+ mm.AppendBytes(5, x.BearerToken)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *AddRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "AddRequest_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // ContainerId
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "ContainerId")
+ }
+ x.ContainerId = data
+ case 2: // TreeId
+ data, ok := fc.String()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "TreeId")
+ }
+ x.TreeId = data
+ case 3: // ParentId
+ data, ok := fc.Uint64()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "ParentId")
+ }
+ x.ParentId = data
+ case 4: // Meta
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Meta")
+ }
+ x.Meta = append(x.Meta, new(KeyValue))
+ ff := x.Meta[len(x.Meta)-1]
+ if err := ff.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 5: // BearerToken
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "BearerToken")
+ }
+ x.BearerToken = data
+ }
+ }
+ return nil
+}
+func (x *AddRequest_Body) GetContainerId() []byte {
+ if x != nil {
+ return x.ContainerId
+ }
+ return nil
+}
+func (x *AddRequest_Body) SetContainerId(v []byte) {
+ x.ContainerId = v
+}
+func (x *AddRequest_Body) GetTreeId() string {
+ if x != nil {
+ return x.TreeId
+ }
+ return ""
+}
+func (x *AddRequest_Body) SetTreeId(v string) {
+ x.TreeId = v
+}
+func (x *AddRequest_Body) GetParentId() uint64 {
+ if x != nil {
+ return x.ParentId
+ }
+ return 0
+}
+func (x *AddRequest_Body) SetParentId(v uint64) {
+ x.ParentId = v
+}
+func (x *AddRequest_Body) GetMeta() []*KeyValue {
+ if x != nil {
+ return x.Meta
+ }
+ return nil
+}
+func (x *AddRequest_Body) SetMeta(v []*KeyValue) {
+ x.Meta = v
+}
+func (x *AddRequest_Body) GetBearerToken() []byte {
+ if x != nil {
+ return x.BearerToken
+ }
+ return nil
+}
+func (x *AddRequest_Body) SetBearerToken(v []byte) {
+ x.BearerToken = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *AddRequest_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *AddRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"containerId\":"
+ out.RawString(prefix[1:])
+ out.Base64Bytes(x.ContainerId)
+ }
+ {
+ const prefix string = ",\"treeId\":"
+ out.RawString(prefix)
+ out.String(x.TreeId)
+ }
+ {
+ const prefix string = ",\"parentId\":"
+ out.RawString(prefix)
+ out.Uint64(x.ParentId)
+ }
+ {
+ const prefix string = ",\"meta\":"
+ out.RawString(prefix)
+ out.RawByte('[')
+ for i := range x.Meta {
+ if i != 0 {
+ out.RawByte(',')
+ }
+ x.Meta[i].MarshalEasyJSON(out)
+ }
+ out.RawByte(']')
+ }
+ {
+ const prefix string = ",\"bearerToken\":"
+ out.RawString(prefix)
+ out.Base64Bytes(x.BearerToken)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *AddRequest_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *AddRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "containerId":
+ {
+ var f []byte
+ f = in.Bytes()
+ x.ContainerId = f
+ }
+ case "treeId":
+ {
+ var f string
+ f = in.String()
+ x.TreeId = f
+ }
+ case "parentId":
+ {
+ var f uint64
+ f = in.Uint64()
+ x.ParentId = f
+ }
+ case "meta":
+ {
+ var f *KeyValue
+ var list []*KeyValue
+ in.Delim('[')
+ for !in.IsDelim(']') {
+ f = new(KeyValue)
+ f.UnmarshalEasyJSON(in)
+ list = append(list, f)
+ in.WantComma()
+ }
+ x.Meta = list
+ in.Delim(']')
+ }
+ case "bearerToken":
+ {
+ var f []byte
+ f = in.Bytes()
+ x.BearerToken = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type AddRequest struct {
+ Body *AddRequest_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*AddRequest)(nil)
+ _ encoding.ProtoUnmarshaler = (*AddRequest)(nil)
+ _ json.Marshaler = (*AddRequest)(nil)
+ _ json.Unmarshaler = (*AddRequest)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -64,27 +315,6 @@ func (x *AddRequest) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *AddRequest) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -100,13 +330,164 @@ func (x *AddRequest) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *AddRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *AddRequest) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *AddRequest) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *AddRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil && x.Body.StableSize() != 0 {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil && x.Signature.StableSize() != 0 {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *AddRequest) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "AddRequest")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(AddRequest_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *AddRequest) GetBody() *AddRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *AddRequest) SetBody(v *AddRequest_Body) {
+ x.Body = v
+}
+func (x *AddRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *AddRequest) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *AddRequest) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *AddRequest) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ const prefix string = ",\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *AddRequest) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *AddRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *AddRequest_Body
+ f = new(AddRequest_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type AddResponse_Body struct {
+ NodeId uint64 `json:"nodeId"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*AddResponse_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*AddResponse_Body)(nil)
+ _ json.Marshaler = (*AddResponse_Body)(nil)
+ _ json.Unmarshaler = (*AddResponse_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -118,26 +499,125 @@ func (x *AddResponse_Body) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *AddResponse_Body) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.UInt64Marshal(1, buf[offset:], x.NodeId)
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *AddResponse_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *AddResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.NodeId != 0 {
+ mm.AppendUint64(1, x.NodeId)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *AddResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "AddResponse_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // NodeId
+ data, ok := fc.Uint64()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "NodeId")
+ }
+ x.NodeId = data
+ }
+ }
+ return nil
+}
+func (x *AddResponse_Body) GetNodeId() uint64 {
+ if x != nil {
+ return x.NodeId
+ }
+ return 0
+}
+func (x *AddResponse_Body) SetNodeId(v uint64) {
+ x.NodeId = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *AddResponse_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *AddResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"nodeId\":"
+ out.RawString(prefix[1:])
+ out.Uint64(x.NodeId)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *AddResponse_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *AddResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "nodeId":
+ {
+ var f uint64
+ f = in.Uint64()
+ x.NodeId = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type AddResponse struct {
+ Body *AddResponse_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*AddResponse)(nil)
+ _ encoding.ProtoUnmarshaler = (*AddResponse)(nil)
+ _ json.Marshaler = (*AddResponse)(nil)
+ _ json.Unmarshaler = (*AddResponse)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -150,27 +630,6 @@ func (x *AddResponse) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *AddResponse) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -186,13 +645,169 @@ func (x *AddResponse) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *AddResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *AddResponse) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *AddResponse) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *AddResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil && x.Body.StableSize() != 0 {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil && x.Signature.StableSize() != 0 {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *AddResponse) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "AddResponse")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(AddResponse_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *AddResponse) GetBody() *AddResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *AddResponse) SetBody(v *AddResponse_Body) {
+ x.Body = v
+}
+func (x *AddResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *AddResponse) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *AddResponse) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *AddResponse) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ const prefix string = ",\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *AddResponse) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *AddResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *AddResponse_Body
+ f = new(AddResponse_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type AddByPathRequest_Body struct {
+ ContainerId []byte `json:"containerId"`
+ TreeId string `json:"treeId"`
+ PathAttribute string `json:"pathAttribute"`
+ Path []string `json:"path"`
+ Meta []*KeyValue `json:"meta"`
+ BearerToken []byte `json:"bearerToken"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*AddByPathRequest_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*AddByPathRequest_Body)(nil)
+ _ json.Marshaler = (*AddByPathRequest_Body)(nil)
+ _ json.Unmarshaler = (*AddByPathRequest_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -211,33 +826,305 @@ func (x *AddByPathRequest_Body) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *AddByPathRequest_Body) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.BytesMarshal(1, buf[offset:], x.ContainerId)
- offset += proto.StringMarshal(2, buf[offset:], x.TreeId)
- offset += proto.StringMarshal(3, buf[offset:], x.PathAttribute)
- offset += proto.RepeatedStringMarshal(4, buf[offset:], x.Path)
- for i := range x.Meta {
- offset += proto.NestedStructureMarshal(5, buf[offset:], x.Meta[i])
- }
- offset += proto.BytesMarshal(6, buf[offset:], x.BearerToken)
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *AddByPathRequest_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *AddByPathRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if len(x.ContainerId) != 0 {
+ mm.AppendBytes(1, x.ContainerId)
+ }
+ if len(x.TreeId) != 0 {
+ mm.AppendString(2, x.TreeId)
+ }
+ if len(x.PathAttribute) != 0 {
+ mm.AppendString(3, x.PathAttribute)
+ }
+ for j := range x.Path {
+ mm.AppendString(4, x.Path[j])
+ }
+ for i := range x.Meta {
+ if x.Meta[i] != nil && x.Meta[i].StableSize() != 0 {
+ x.Meta[i].EmitProtobuf(mm.AppendMessage(5))
+ }
+ }
+ if len(x.BearerToken) != 0 {
+ mm.AppendBytes(6, x.BearerToken)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *AddByPathRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "AddByPathRequest_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // ContainerId
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "ContainerId")
+ }
+ x.ContainerId = data
+ case 2: // TreeId
+ data, ok := fc.String()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "TreeId")
+ }
+ x.TreeId = data
+ case 3: // PathAttribute
+ data, ok := fc.String()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "PathAttribute")
+ }
+ x.PathAttribute = data
+ case 4: // Path
+ data, ok := fc.String()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Path")
+ }
+ x.Path = append(x.Path, data)
+ case 5: // Meta
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Meta")
+ }
+ x.Meta = append(x.Meta, new(KeyValue))
+ ff := x.Meta[len(x.Meta)-1]
+ if err := ff.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 6: // BearerToken
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "BearerToken")
+ }
+ x.BearerToken = data
+ }
+ }
+ return nil
+}
+func (x *AddByPathRequest_Body) GetContainerId() []byte {
+ if x != nil {
+ return x.ContainerId
+ }
+ return nil
+}
+func (x *AddByPathRequest_Body) SetContainerId(v []byte) {
+ x.ContainerId = v
+}
+func (x *AddByPathRequest_Body) GetTreeId() string {
+ if x != nil {
+ return x.TreeId
+ }
+ return ""
+}
+func (x *AddByPathRequest_Body) SetTreeId(v string) {
+ x.TreeId = v
+}
+func (x *AddByPathRequest_Body) GetPathAttribute() string {
+ if x != nil {
+ return x.PathAttribute
+ }
+ return ""
+}
+func (x *AddByPathRequest_Body) SetPathAttribute(v string) {
+ x.PathAttribute = v
+}
+func (x *AddByPathRequest_Body) GetPath() []string {
+ if x != nil {
+ return x.Path
+ }
+ return nil
+}
+func (x *AddByPathRequest_Body) SetPath(v []string) {
+ x.Path = v
+}
+func (x *AddByPathRequest_Body) GetMeta() []*KeyValue {
+ if x != nil {
+ return x.Meta
+ }
+ return nil
+}
+func (x *AddByPathRequest_Body) SetMeta(v []*KeyValue) {
+ x.Meta = v
+}
+func (x *AddByPathRequest_Body) GetBearerToken() []byte {
+ if x != nil {
+ return x.BearerToken
+ }
+ return nil
+}
+func (x *AddByPathRequest_Body) SetBearerToken(v []byte) {
+ x.BearerToken = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *AddByPathRequest_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *AddByPathRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"containerId\":"
+ out.RawString(prefix[1:])
+ out.Base64Bytes(x.ContainerId)
+ }
+ {
+ const prefix string = ",\"treeId\":"
+ out.RawString(prefix)
+ out.String(x.TreeId)
+ }
+ {
+ const prefix string = ",\"pathAttribute\":"
+ out.RawString(prefix)
+ out.String(x.PathAttribute)
+ }
+ {
+ const prefix string = ",\"path\":"
+ out.RawString(prefix)
+ out.RawByte('[')
+ for i := range x.Path {
+ if i != 0 {
+ out.RawByte(',')
+ }
+ out.String(x.Path[i])
+ }
+ out.RawByte(']')
+ }
+ {
+ const prefix string = ",\"meta\":"
+ out.RawString(prefix)
+ out.RawByte('[')
+ for i := range x.Meta {
+ if i != 0 {
+ out.RawByte(',')
+ }
+ x.Meta[i].MarshalEasyJSON(out)
+ }
+ out.RawByte(']')
+ }
+ {
+ const prefix string = ",\"bearerToken\":"
+ out.RawString(prefix)
+ out.Base64Bytes(x.BearerToken)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *AddByPathRequest_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *AddByPathRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "containerId":
+ {
+ var f []byte
+ f = in.Bytes()
+ x.ContainerId = f
+ }
+ case "treeId":
+ {
+ var f string
+ f = in.String()
+ x.TreeId = f
+ }
+ case "pathAttribute":
+ {
+ var f string
+ f = in.String()
+ x.PathAttribute = f
+ }
+ case "path":
+ {
+ var f string
+ var list []string
+ in.Delim('[')
+ for !in.IsDelim(']') {
+ f = in.String()
+ list = append(list, f)
+ in.WantComma()
+ }
+ x.Path = list
+ in.Delim(']')
+ }
+ case "meta":
+ {
+ var f *KeyValue
+ var list []*KeyValue
+ in.Delim('[')
+ for !in.IsDelim(']') {
+ f = new(KeyValue)
+ f.UnmarshalEasyJSON(in)
+ list = append(list, f)
+ in.WantComma()
+ }
+ x.Meta = list
+ in.Delim(']')
+ }
+ case "bearerToken":
+ {
+ var f []byte
+ f = in.Bytes()
+ x.BearerToken = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type AddByPathRequest struct {
+ Body *AddByPathRequest_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*AddByPathRequest)(nil)
+ _ encoding.ProtoUnmarshaler = (*AddByPathRequest)(nil)
+ _ json.Marshaler = (*AddByPathRequest)(nil)
+ _ json.Unmarshaler = (*AddByPathRequest)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -250,27 +1137,6 @@ func (x *AddByPathRequest) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *AddByPathRequest) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -286,13 +1152,165 @@ func (x *AddByPathRequest) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *AddByPathRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *AddByPathRequest) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *AddByPathRequest) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *AddByPathRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil && x.Body.StableSize() != 0 {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil && x.Signature.StableSize() != 0 {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *AddByPathRequest) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "AddByPathRequest")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(AddByPathRequest_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *AddByPathRequest) GetBody() *AddByPathRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *AddByPathRequest) SetBody(v *AddByPathRequest_Body) {
+ x.Body = v
+}
+func (x *AddByPathRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *AddByPathRequest) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *AddByPathRequest) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *AddByPathRequest) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ const prefix string = ",\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *AddByPathRequest) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *AddByPathRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *AddByPathRequest_Body
+ f = new(AddByPathRequest_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type AddByPathResponse_Body struct {
+ Nodes []uint64 `json:"nodes"`
+ ParentId uint64 `json:"parentId"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*AddByPathResponse_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*AddByPathResponse_Body)(nil)
+ _ json.Marshaler = (*AddByPathResponse_Body)(nil)
+ _ json.Unmarshaler = (*AddByPathResponse_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -307,27 +1325,168 @@ func (x *AddByPathResponse_Body) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *AddByPathResponse_Body) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.RepeatedUInt64Marshal(1, buf[offset:], x.Nodes)
- offset += proto.UInt64Marshal(2, buf[offset:], x.ParentId)
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *AddByPathResponse_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *AddByPathResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if len(x.Nodes) != 0 {
+ mm.AppendUint64s(1, x.Nodes)
+ }
+ if x.ParentId != 0 {
+ mm.AppendUint64(2, x.ParentId)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *AddByPathResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "AddByPathResponse_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // Nodes
+ data, ok := fc.UnpackUint64s(nil)
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Nodes")
+ }
+ x.Nodes = data
+ case 2: // ParentId
+ data, ok := fc.Uint64()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "ParentId")
+ }
+ x.ParentId = data
+ }
+ }
+ return nil
+}
+func (x *AddByPathResponse_Body) GetNodes() []uint64 {
+ if x != nil {
+ return x.Nodes
+ }
+ return nil
+}
+func (x *AddByPathResponse_Body) SetNodes(v []uint64) {
+ x.Nodes = v
+}
+func (x *AddByPathResponse_Body) GetParentId() uint64 {
+ if x != nil {
+ return x.ParentId
+ }
+ return 0
+}
+func (x *AddByPathResponse_Body) SetParentId(v uint64) {
+ x.ParentId = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *AddByPathResponse_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *AddByPathResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"nodes\":"
+ out.RawString(prefix[1:])
+ out.RawByte('[')
+ for i := range x.Nodes {
+ if i != 0 {
+ out.RawByte(',')
+ }
+ out.Uint64(x.Nodes[i])
+ }
+ out.RawByte(']')
+ }
+ {
+ const prefix string = ",\"parentId\":"
+ out.RawString(prefix)
+ out.Uint64(x.ParentId)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *AddByPathResponse_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *AddByPathResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "nodes":
+ {
+ var f uint64
+ var list []uint64
+ in.Delim('[')
+ for !in.IsDelim(']') {
+ f = in.Uint64()
+ list = append(list, f)
+ in.WantComma()
+ }
+ x.Nodes = list
+ in.Delim(']')
+ }
+ case "parentId":
+ {
+ var f uint64
+ f = in.Uint64()
+ x.ParentId = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type AddByPathResponse struct {
+ Body *AddByPathResponse_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*AddByPathResponse)(nil)
+ _ encoding.ProtoUnmarshaler = (*AddByPathResponse)(nil)
+ _ json.Marshaler = (*AddByPathResponse)(nil)
+ _ json.Unmarshaler = (*AddByPathResponse)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -340,27 +1499,6 @@ func (x *AddByPathResponse) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *AddByPathResponse) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -376,13 +1514,167 @@ func (x *AddByPathResponse) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *AddByPathResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *AddByPathResponse) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *AddByPathResponse) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *AddByPathResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil && x.Body.StableSize() != 0 {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil && x.Signature.StableSize() != 0 {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *AddByPathResponse) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "AddByPathResponse")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(AddByPathResponse_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *AddByPathResponse) GetBody() *AddByPathResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *AddByPathResponse) SetBody(v *AddByPathResponse_Body) {
+ x.Body = v
+}
+func (x *AddByPathResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *AddByPathResponse) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *AddByPathResponse) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *AddByPathResponse) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ const prefix string = ",\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *AddByPathResponse) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *AddByPathResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *AddByPathResponse_Body
+ f = new(AddByPathResponse_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type RemoveRequest_Body struct {
+ ContainerId []byte `json:"containerId"`
+ TreeId string `json:"treeId"`
+ NodeId uint64 `json:"nodeId"`
+ BearerToken []byte `json:"bearerToken"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*RemoveRequest_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*RemoveRequest_Body)(nil)
+ _ json.Marshaler = (*RemoveRequest_Body)(nil)
+ _ json.Unmarshaler = (*RemoveRequest_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -397,29 +1689,212 @@ func (x *RemoveRequest_Body) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *RemoveRequest_Body) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.BytesMarshal(1, buf[offset:], x.ContainerId)
- offset += proto.StringMarshal(2, buf[offset:], x.TreeId)
- offset += proto.UInt64Marshal(3, buf[offset:], x.NodeId)
- offset += proto.BytesMarshal(4, buf[offset:], x.BearerToken)
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *RemoveRequest_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *RemoveRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if len(x.ContainerId) != 0 {
+ mm.AppendBytes(1, x.ContainerId)
+ }
+ if len(x.TreeId) != 0 {
+ mm.AppendString(2, x.TreeId)
+ }
+ if x.NodeId != 0 {
+ mm.AppendUint64(3, x.NodeId)
+ }
+ if len(x.BearerToken) != 0 {
+ mm.AppendBytes(4, x.BearerToken)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *RemoveRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "RemoveRequest_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // ContainerId
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "ContainerId")
+ }
+ x.ContainerId = data
+ case 2: // TreeId
+ data, ok := fc.String()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "TreeId")
+ }
+ x.TreeId = data
+ case 3: // NodeId
+ data, ok := fc.Uint64()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "NodeId")
+ }
+ x.NodeId = data
+ case 4: // BearerToken
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "BearerToken")
+ }
+ x.BearerToken = data
+ }
+ }
+ return nil
+}
+func (x *RemoveRequest_Body) GetContainerId() []byte {
+ if x != nil {
+ return x.ContainerId
+ }
+ return nil
+}
+func (x *RemoveRequest_Body) SetContainerId(v []byte) {
+ x.ContainerId = v
+}
+func (x *RemoveRequest_Body) GetTreeId() string {
+ if x != nil {
+ return x.TreeId
+ }
+ return ""
+}
+func (x *RemoveRequest_Body) SetTreeId(v string) {
+ x.TreeId = v
+}
+func (x *RemoveRequest_Body) GetNodeId() uint64 {
+ if x != nil {
+ return x.NodeId
+ }
+ return 0
+}
+func (x *RemoveRequest_Body) SetNodeId(v uint64) {
+ x.NodeId = v
+}
+func (x *RemoveRequest_Body) GetBearerToken() []byte {
+ if x != nil {
+ return x.BearerToken
+ }
+ return nil
+}
+func (x *RemoveRequest_Body) SetBearerToken(v []byte) {
+ x.BearerToken = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *RemoveRequest_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *RemoveRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"containerId\":"
+ out.RawString(prefix[1:])
+ out.Base64Bytes(x.ContainerId)
+ }
+ {
+ const prefix string = ",\"treeId\":"
+ out.RawString(prefix)
+ out.String(x.TreeId)
+ }
+ {
+ const prefix string = ",\"nodeId\":"
+ out.RawString(prefix)
+ out.Uint64(x.NodeId)
+ }
+ {
+ const prefix string = ",\"bearerToken\":"
+ out.RawString(prefix)
+ out.Base64Bytes(x.BearerToken)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *RemoveRequest_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *RemoveRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "containerId":
+ {
+ var f []byte
+ f = in.Bytes()
+ x.ContainerId = f
+ }
+ case "treeId":
+ {
+ var f string
+ f = in.String()
+ x.TreeId = f
+ }
+ case "nodeId":
+ {
+ var f uint64
+ f = in.Uint64()
+ x.NodeId = f
+ }
+ case "bearerToken":
+ {
+ var f []byte
+ f = in.Bytes()
+ x.BearerToken = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type RemoveRequest struct {
+ Body *RemoveRequest_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*RemoveRequest)(nil)
+ _ encoding.ProtoUnmarshaler = (*RemoveRequest)(nil)
+ _ json.Marshaler = (*RemoveRequest)(nil)
+ _ json.Unmarshaler = (*RemoveRequest)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -432,27 +1907,6 @@ func (x *RemoveRequest) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *RemoveRequest) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -468,13 +1922,163 @@ func (x *RemoveRequest) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *RemoveRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *RemoveRequest) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *RemoveRequest) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *RemoveRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil && x.Body.StableSize() != 0 {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil && x.Signature.StableSize() != 0 {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *RemoveRequest) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "RemoveRequest")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(RemoveRequest_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *RemoveRequest) GetBody() *RemoveRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *RemoveRequest) SetBody(v *RemoveRequest_Body) {
+ x.Body = v
+}
+func (x *RemoveRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *RemoveRequest) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *RemoveRequest) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *RemoveRequest) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ const prefix string = ",\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *RemoveRequest) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *RemoveRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *RemoveRequest_Body
+ f = new(RemoveRequest_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type RemoveResponse_Body struct {
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*RemoveResponse_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*RemoveResponse_Body)(nil)
+ _ json.Marshaler = (*RemoveResponse_Body)(nil)
+ _ json.Unmarshaler = (*RemoveResponse_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -485,18 +2089,93 @@ func (x *RemoveResponse_Body) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *RemoveResponse_Body) StableMarshal(buf []byte) []byte {
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *RemoveResponse_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *RemoveResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *RemoveResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "RemoveResponse_Body")
+ }
+ switch fc.FieldNum {
+ }
+ }
+ return nil
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *RemoveResponse_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *RemoveResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *RemoveResponse_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *RemoveResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type RemoveResponse struct {
+ Body *RemoveResponse_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*RemoveResponse)(nil)
+ _ encoding.ProtoUnmarshaler = (*RemoveResponse)(nil)
+ _ json.Marshaler = (*RemoveResponse)(nil)
+ _ json.Unmarshaler = (*RemoveResponse)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -509,27 +2188,6 @@ func (x *RemoveResponse) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *RemoveResponse) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -545,13 +2203,169 @@ func (x *RemoveResponse) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *RemoveResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *RemoveResponse) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *RemoveResponse) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *RemoveResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil && x.Body.StableSize() != 0 {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil && x.Signature.StableSize() != 0 {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *RemoveResponse) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "RemoveResponse")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(RemoveResponse_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *RemoveResponse) GetBody() *RemoveResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *RemoveResponse) SetBody(v *RemoveResponse_Body) {
+ x.Body = v
+}
+func (x *RemoveResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *RemoveResponse) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *RemoveResponse) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *RemoveResponse) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ const prefix string = ",\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *RemoveResponse) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *RemoveResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *RemoveResponse_Body
+ f = new(RemoveResponse_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type MoveRequest_Body struct {
+ ContainerId []byte `json:"containerId"`
+ TreeId string `json:"treeId"`
+ ParentId uint64 `json:"parentId"`
+ NodeId uint64 `json:"nodeId"`
+ Meta []*KeyValue `json:"meta"`
+ BearerToken []byte `json:"bearerToken"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*MoveRequest_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*MoveRequest_Body)(nil)
+ _ json.Marshaler = (*MoveRequest_Body)(nil)
+ _ json.Unmarshaler = (*MoveRequest_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -570,33 +2384,291 @@ func (x *MoveRequest_Body) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *MoveRequest_Body) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.BytesMarshal(1, buf[offset:], x.ContainerId)
- offset += proto.StringMarshal(2, buf[offset:], x.TreeId)
- offset += proto.UInt64Marshal(3, buf[offset:], x.ParentId)
- offset += proto.UInt64Marshal(4, buf[offset:], x.NodeId)
- for i := range x.Meta {
- offset += proto.NestedStructureMarshal(5, buf[offset:], x.Meta[i])
- }
- offset += proto.BytesMarshal(6, buf[offset:], x.BearerToken)
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *MoveRequest_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *MoveRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if len(x.ContainerId) != 0 {
+ mm.AppendBytes(1, x.ContainerId)
+ }
+ if len(x.TreeId) != 0 {
+ mm.AppendString(2, x.TreeId)
+ }
+ if x.ParentId != 0 {
+ mm.AppendUint64(3, x.ParentId)
+ }
+ if x.NodeId != 0 {
+ mm.AppendUint64(4, x.NodeId)
+ }
+ for i := range x.Meta {
+ if x.Meta[i] != nil && x.Meta[i].StableSize() != 0 {
+ x.Meta[i].EmitProtobuf(mm.AppendMessage(5))
+ }
+ }
+ if len(x.BearerToken) != 0 {
+ mm.AppendBytes(6, x.BearerToken)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *MoveRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "MoveRequest_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // ContainerId
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "ContainerId")
+ }
+ x.ContainerId = data
+ case 2: // TreeId
+ data, ok := fc.String()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "TreeId")
+ }
+ x.TreeId = data
+ case 3: // ParentId
+ data, ok := fc.Uint64()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "ParentId")
+ }
+ x.ParentId = data
+ case 4: // NodeId
+ data, ok := fc.Uint64()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "NodeId")
+ }
+ x.NodeId = data
+ case 5: // Meta
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Meta")
+ }
+ x.Meta = append(x.Meta, new(KeyValue))
+ ff := x.Meta[len(x.Meta)-1]
+ if err := ff.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 6: // BearerToken
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "BearerToken")
+ }
+ x.BearerToken = data
+ }
+ }
+ return nil
+}
+func (x *MoveRequest_Body) GetContainerId() []byte {
+ if x != nil {
+ return x.ContainerId
+ }
+ return nil
+}
+func (x *MoveRequest_Body) SetContainerId(v []byte) {
+ x.ContainerId = v
+}
+func (x *MoveRequest_Body) GetTreeId() string {
+ if x != nil {
+ return x.TreeId
+ }
+ return ""
+}
+func (x *MoveRequest_Body) SetTreeId(v string) {
+ x.TreeId = v
+}
+func (x *MoveRequest_Body) GetParentId() uint64 {
+ if x != nil {
+ return x.ParentId
+ }
+ return 0
+}
+func (x *MoveRequest_Body) SetParentId(v uint64) {
+ x.ParentId = v
+}
+func (x *MoveRequest_Body) GetNodeId() uint64 {
+ if x != nil {
+ return x.NodeId
+ }
+ return 0
+}
+func (x *MoveRequest_Body) SetNodeId(v uint64) {
+ x.NodeId = v
+}
+func (x *MoveRequest_Body) GetMeta() []*KeyValue {
+ if x != nil {
+ return x.Meta
+ }
+ return nil
+}
+func (x *MoveRequest_Body) SetMeta(v []*KeyValue) {
+ x.Meta = v
+}
+func (x *MoveRequest_Body) GetBearerToken() []byte {
+ if x != nil {
+ return x.BearerToken
+ }
+ return nil
+}
+func (x *MoveRequest_Body) SetBearerToken(v []byte) {
+ x.BearerToken = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *MoveRequest_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *MoveRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"containerId\":"
+ out.RawString(prefix[1:])
+ out.Base64Bytes(x.ContainerId)
+ }
+ {
+ const prefix string = ",\"treeId\":"
+ out.RawString(prefix)
+ out.String(x.TreeId)
+ }
+ {
+ const prefix string = ",\"parentId\":"
+ out.RawString(prefix)
+ out.Uint64(x.ParentId)
+ }
+ {
+ const prefix string = ",\"nodeId\":"
+ out.RawString(prefix)
+ out.Uint64(x.NodeId)
+ }
+ {
+ const prefix string = ",\"meta\":"
+ out.RawString(prefix)
+ out.RawByte('[')
+ for i := range x.Meta {
+ if i != 0 {
+ out.RawByte(',')
+ }
+ x.Meta[i].MarshalEasyJSON(out)
+ }
+ out.RawByte(']')
+ }
+ {
+ const prefix string = ",\"bearerToken\":"
+ out.RawString(prefix)
+ out.Base64Bytes(x.BearerToken)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *MoveRequest_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *MoveRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "containerId":
+ {
+ var f []byte
+ f = in.Bytes()
+ x.ContainerId = f
+ }
+ case "treeId":
+ {
+ var f string
+ f = in.String()
+ x.TreeId = f
+ }
+ case "parentId":
+ {
+ var f uint64
+ f = in.Uint64()
+ x.ParentId = f
+ }
+ case "nodeId":
+ {
+ var f uint64
+ f = in.Uint64()
+ x.NodeId = f
+ }
+ case "meta":
+ {
+ var f *KeyValue
+ var list []*KeyValue
+ in.Delim('[')
+ for !in.IsDelim(']') {
+ f = new(KeyValue)
+ f.UnmarshalEasyJSON(in)
+ list = append(list, f)
+ in.WantComma()
+ }
+ x.Meta = list
+ in.Delim(']')
+ }
+ case "bearerToken":
+ {
+ var f []byte
+ f = in.Bytes()
+ x.BearerToken = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type MoveRequest struct {
+ Body *MoveRequest_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*MoveRequest)(nil)
+ _ encoding.ProtoUnmarshaler = (*MoveRequest)(nil)
+ _ json.Marshaler = (*MoveRequest)(nil)
+ _ json.Unmarshaler = (*MoveRequest)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -609,27 +2681,6 @@ func (x *MoveRequest) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *MoveRequest) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -645,13 +2696,163 @@ func (x *MoveRequest) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *MoveRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *MoveRequest) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *MoveRequest) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *MoveRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil && x.Body.StableSize() != 0 {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil && x.Signature.StableSize() != 0 {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *MoveRequest) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "MoveRequest")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(MoveRequest_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *MoveRequest) GetBody() *MoveRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *MoveRequest) SetBody(v *MoveRequest_Body) {
+ x.Body = v
+}
+func (x *MoveRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *MoveRequest) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *MoveRequest) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *MoveRequest) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ const prefix string = ",\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *MoveRequest) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *MoveRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *MoveRequest_Body
+ f = new(MoveRequest_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type MoveResponse_Body struct {
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*MoveResponse_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*MoveResponse_Body)(nil)
+ _ json.Marshaler = (*MoveResponse_Body)(nil)
+ _ json.Unmarshaler = (*MoveResponse_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -662,18 +2863,93 @@ func (x *MoveResponse_Body) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *MoveResponse_Body) StableMarshal(buf []byte) []byte {
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *MoveResponse_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *MoveResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *MoveResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "MoveResponse_Body")
+ }
+ switch fc.FieldNum {
+ }
+ }
+ return nil
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *MoveResponse_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *MoveResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *MoveResponse_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *MoveResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type MoveResponse struct {
+ Body *MoveResponse_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*MoveResponse)(nil)
+ _ encoding.ProtoUnmarshaler = (*MoveResponse)(nil)
+ _ json.Marshaler = (*MoveResponse)(nil)
+ _ json.Unmarshaler = (*MoveResponse)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -686,27 +2962,6 @@ func (x *MoveResponse) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *MoveResponse) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -722,13 +2977,171 @@ func (x *MoveResponse) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *MoveResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *MoveResponse) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *MoveResponse) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *MoveResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil && x.Body.StableSize() != 0 {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil && x.Signature.StableSize() != 0 {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *MoveResponse) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "MoveResponse")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(MoveResponse_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *MoveResponse) GetBody() *MoveResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *MoveResponse) SetBody(v *MoveResponse_Body) {
+ x.Body = v
+}
+func (x *MoveResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *MoveResponse) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *MoveResponse) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *MoveResponse) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ const prefix string = ",\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *MoveResponse) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *MoveResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *MoveResponse_Body
+ f = new(MoveResponse_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type GetNodeByPathRequest_Body struct {
+ ContainerId []byte `json:"containerId"`
+ TreeId string `json:"treeId"`
+ PathAttribute string `json:"pathAttribute"`
+ Path []string `json:"path"`
+ Attributes []string `json:"attributes"`
+ LatestOnly bool `json:"latestOnly"`
+ AllAttributes bool `json:"allAttributes"`
+ BearerToken []byte `json:"bearerToken"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*GetNodeByPathRequest_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*GetNodeByPathRequest_Body)(nil)
+ _ json.Marshaler = (*GetNodeByPathRequest_Body)(nil)
+ _ json.Unmarshaler = (*GetNodeByPathRequest_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -747,33 +3160,356 @@ func (x *GetNodeByPathRequest_Body) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *GetNodeByPathRequest_Body) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.BytesMarshal(1, buf[offset:], x.ContainerId)
- offset += proto.StringMarshal(2, buf[offset:], x.TreeId)
- offset += proto.StringMarshal(3, buf[offset:], x.PathAttribute)
- offset += proto.RepeatedStringMarshal(4, buf[offset:], x.Path)
- offset += proto.RepeatedStringMarshal(5, buf[offset:], x.Attributes)
- offset += proto.BoolMarshal(6, buf[offset:], x.LatestOnly)
- offset += proto.BoolMarshal(7, buf[offset:], x.AllAttributes)
- offset += proto.BytesMarshal(8, buf[offset:], x.BearerToken)
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *GetNodeByPathRequest_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *GetNodeByPathRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if len(x.ContainerId) != 0 {
+ mm.AppendBytes(1, x.ContainerId)
+ }
+ if len(x.TreeId) != 0 {
+ mm.AppendString(2, x.TreeId)
+ }
+ if len(x.PathAttribute) != 0 {
+ mm.AppendString(3, x.PathAttribute)
+ }
+ for j := range x.Path {
+ mm.AppendString(4, x.Path[j])
+ }
+ for j := range x.Attributes {
+ mm.AppendString(5, x.Attributes[j])
+ }
+ if x.LatestOnly {
+ mm.AppendBool(6, x.LatestOnly)
+ }
+ if x.AllAttributes {
+ mm.AppendBool(7, x.AllAttributes)
+ }
+ if len(x.BearerToken) != 0 {
+ mm.AppendBytes(8, x.BearerToken)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *GetNodeByPathRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "GetNodeByPathRequest_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // ContainerId
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "ContainerId")
+ }
+ x.ContainerId = data
+ case 2: // TreeId
+ data, ok := fc.String()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "TreeId")
+ }
+ x.TreeId = data
+ case 3: // PathAttribute
+ data, ok := fc.String()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "PathAttribute")
+ }
+ x.PathAttribute = data
+ case 4: // Path
+ data, ok := fc.String()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Path")
+ }
+ x.Path = append(x.Path, data)
+ case 5: // Attributes
+ data, ok := fc.String()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Attributes")
+ }
+ x.Attributes = append(x.Attributes, data)
+ case 6: // LatestOnly
+ data, ok := fc.Bool()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "LatestOnly")
+ }
+ x.LatestOnly = data
+ case 7: // AllAttributes
+ data, ok := fc.Bool()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "AllAttributes")
+ }
+ x.AllAttributes = data
+ case 8: // BearerToken
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "BearerToken")
+ }
+ x.BearerToken = data
+ }
+ }
+ return nil
+}
+func (x *GetNodeByPathRequest_Body) GetContainerId() []byte {
+ if x != nil {
+ return x.ContainerId
+ }
+ return nil
+}
+func (x *GetNodeByPathRequest_Body) SetContainerId(v []byte) {
+ x.ContainerId = v
+}
+func (x *GetNodeByPathRequest_Body) GetTreeId() string {
+ if x != nil {
+ return x.TreeId
+ }
+ return ""
+}
+func (x *GetNodeByPathRequest_Body) SetTreeId(v string) {
+ x.TreeId = v
+}
+func (x *GetNodeByPathRequest_Body) GetPathAttribute() string {
+ if x != nil {
+ return x.PathAttribute
+ }
+ return ""
+}
+func (x *GetNodeByPathRequest_Body) SetPathAttribute(v string) {
+ x.PathAttribute = v
+}
+func (x *GetNodeByPathRequest_Body) GetPath() []string {
+ if x != nil {
+ return x.Path
+ }
+ return nil
+}
+func (x *GetNodeByPathRequest_Body) SetPath(v []string) {
+ x.Path = v
+}
+func (x *GetNodeByPathRequest_Body) GetAttributes() []string {
+ if x != nil {
+ return x.Attributes
+ }
+ return nil
+}
+func (x *GetNodeByPathRequest_Body) SetAttributes(v []string) {
+ x.Attributes = v
+}
+func (x *GetNodeByPathRequest_Body) GetLatestOnly() bool {
+ if x != nil {
+ return x.LatestOnly
+ }
+ return false
+}
+func (x *GetNodeByPathRequest_Body) SetLatestOnly(v bool) {
+ x.LatestOnly = v
+}
+func (x *GetNodeByPathRequest_Body) GetAllAttributes() bool {
+ if x != nil {
+ return x.AllAttributes
+ }
+ return false
+}
+func (x *GetNodeByPathRequest_Body) SetAllAttributes(v bool) {
+ x.AllAttributes = v
+}
+func (x *GetNodeByPathRequest_Body) GetBearerToken() []byte {
+ if x != nil {
+ return x.BearerToken
+ }
+ return nil
+}
+func (x *GetNodeByPathRequest_Body) SetBearerToken(v []byte) {
+ x.BearerToken = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *GetNodeByPathRequest_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *GetNodeByPathRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"containerId\":"
+ out.RawString(prefix[1:])
+ out.Base64Bytes(x.ContainerId)
+ }
+ {
+ const prefix string = ",\"treeId\":"
+ out.RawString(prefix)
+ out.String(x.TreeId)
+ }
+ {
+ const prefix string = ",\"pathAttribute\":"
+ out.RawString(prefix)
+ out.String(x.PathAttribute)
+ }
+ {
+ const prefix string = ",\"path\":"
+ out.RawString(prefix)
+ out.RawByte('[')
+ for i := range x.Path {
+ if i != 0 {
+ out.RawByte(',')
+ }
+ out.String(x.Path[i])
+ }
+ out.RawByte(']')
+ }
+ {
+ const prefix string = ",\"attributes\":"
+ out.RawString(prefix)
+ out.RawByte('[')
+ for i := range x.Attributes {
+ if i != 0 {
+ out.RawByte(',')
+ }
+ out.String(x.Attributes[i])
+ }
+ out.RawByte(']')
+ }
+ {
+ const prefix string = ",\"latestOnly\":"
+ out.RawString(prefix)
+ out.Bool(x.LatestOnly)
+ }
+ {
+ const prefix string = ",\"allAttributes\":"
+ out.RawString(prefix)
+ out.Bool(x.AllAttributes)
+ }
+ {
+ const prefix string = ",\"bearerToken\":"
+ out.RawString(prefix)
+ out.Base64Bytes(x.BearerToken)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *GetNodeByPathRequest_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *GetNodeByPathRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "containerId":
+ {
+ var f []byte
+ f = in.Bytes()
+ x.ContainerId = f
+ }
+ case "treeId":
+ {
+ var f string
+ f = in.String()
+ x.TreeId = f
+ }
+ case "pathAttribute":
+ {
+ var f string
+ f = in.String()
+ x.PathAttribute = f
+ }
+ case "path":
+ {
+ var f string
+ var list []string
+ in.Delim('[')
+ for !in.IsDelim(']') {
+ f = in.String()
+ list = append(list, f)
+ in.WantComma()
+ }
+ x.Path = list
+ in.Delim(']')
+ }
+ case "attributes":
+ {
+ var f string
+ var list []string
+ in.Delim('[')
+ for !in.IsDelim(']') {
+ f = in.String()
+ list = append(list, f)
+ in.WantComma()
+ }
+ x.Attributes = list
+ in.Delim(']')
+ }
+ case "latestOnly":
+ {
+ var f bool
+ f = in.Bool()
+ x.LatestOnly = f
+ }
+ case "allAttributes":
+ {
+ var f bool
+ f = in.Bool()
+ x.AllAttributes = f
+ }
+ case "bearerToken":
+ {
+ var f []byte
+ f = in.Bytes()
+ x.BearerToken = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type GetNodeByPathRequest struct {
+ Body *GetNodeByPathRequest_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*GetNodeByPathRequest)(nil)
+ _ encoding.ProtoUnmarshaler = (*GetNodeByPathRequest)(nil)
+ _ json.Marshaler = (*GetNodeByPathRequest)(nil)
+ _ json.Unmarshaler = (*GetNodeByPathRequest)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -786,27 +3522,6 @@ func (x *GetNodeByPathRequest) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *GetNodeByPathRequest) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -822,13 +3537,167 @@ func (x *GetNodeByPathRequest) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *GetNodeByPathRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *GetNodeByPathRequest) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *GetNodeByPathRequest) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *GetNodeByPathRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil && x.Body.StableSize() != 0 {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil && x.Signature.StableSize() != 0 {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *GetNodeByPathRequest) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "GetNodeByPathRequest")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(GetNodeByPathRequest_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *GetNodeByPathRequest) GetBody() *GetNodeByPathRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *GetNodeByPathRequest) SetBody(v *GetNodeByPathRequest_Body) {
+ x.Body = v
+}
+func (x *GetNodeByPathRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *GetNodeByPathRequest) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *GetNodeByPathRequest) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *GetNodeByPathRequest) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ const prefix string = ",\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *GetNodeByPathRequest) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *GetNodeByPathRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *GetNodeByPathRequest_Body
+ f = new(GetNodeByPathRequest_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type GetNodeByPathResponse_Info struct {
+ NodeId uint64 `json:"nodeId"`
+ Timestamp uint64 `json:"timestamp"`
+ Meta []*KeyValue `json:"meta"`
+ ParentId uint64 `json:"parentId"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*GetNodeByPathResponse_Info)(nil)
+ _ encoding.ProtoUnmarshaler = (*GetNodeByPathResponse_Info)(nil)
+ _ json.Marshaler = (*GetNodeByPathResponse_Info)(nil)
+ _ json.Unmarshaler = (*GetNodeByPathResponse_Info)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -845,31 +3714,232 @@ func (x *GetNodeByPathResponse_Info) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *GetNodeByPathResponse_Info) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.UInt64Marshal(1, buf[offset:], x.NodeId)
- offset += proto.UInt64Marshal(2, buf[offset:], x.Timestamp)
- for i := range x.Meta {
- offset += proto.NestedStructureMarshal(3, buf[offset:], x.Meta[i])
- }
- offset += proto.UInt64Marshal(4, buf[offset:], x.ParentId)
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *GetNodeByPathResponse_Info) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *GetNodeByPathResponse_Info) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.NodeId != 0 {
+ mm.AppendUint64(1, x.NodeId)
+ }
+ if x.Timestamp != 0 {
+ mm.AppendUint64(2, x.Timestamp)
+ }
+ for i := range x.Meta {
+ if x.Meta[i] != nil && x.Meta[i].StableSize() != 0 {
+ x.Meta[i].EmitProtobuf(mm.AppendMessage(3))
+ }
+ }
+ if x.ParentId != 0 {
+ mm.AppendUint64(4, x.ParentId)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *GetNodeByPathResponse_Info) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "GetNodeByPathResponse_Info")
+ }
+ switch fc.FieldNum {
+ case 1: // NodeId
+ data, ok := fc.Uint64()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "NodeId")
+ }
+ x.NodeId = data
+ case 2: // Timestamp
+ data, ok := fc.Uint64()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Timestamp")
+ }
+ x.Timestamp = data
+ case 3: // Meta
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Meta")
+ }
+ x.Meta = append(x.Meta, new(KeyValue))
+ ff := x.Meta[len(x.Meta)-1]
+ if err := ff.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 4: // ParentId
+ data, ok := fc.Uint64()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "ParentId")
+ }
+ x.ParentId = data
+ }
+ }
+ return nil
+}
+func (x *GetNodeByPathResponse_Info) GetNodeId() uint64 {
+ if x != nil {
+ return x.NodeId
+ }
+ return 0
+}
+func (x *GetNodeByPathResponse_Info) SetNodeId(v uint64) {
+ x.NodeId = v
+}
+func (x *GetNodeByPathResponse_Info) GetTimestamp() uint64 {
+ if x != nil {
+ return x.Timestamp
+ }
+ return 0
+}
+func (x *GetNodeByPathResponse_Info) SetTimestamp(v uint64) {
+ x.Timestamp = v
+}
+func (x *GetNodeByPathResponse_Info) GetMeta() []*KeyValue {
+ if x != nil {
+ return x.Meta
+ }
+ return nil
+}
+func (x *GetNodeByPathResponse_Info) SetMeta(v []*KeyValue) {
+ x.Meta = v
+}
+func (x *GetNodeByPathResponse_Info) GetParentId() uint64 {
+ if x != nil {
+ return x.ParentId
+ }
+ return 0
+}
+func (x *GetNodeByPathResponse_Info) SetParentId(v uint64) {
+ x.ParentId = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *GetNodeByPathResponse_Info) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *GetNodeByPathResponse_Info) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"nodeId\":"
+ out.RawString(prefix[1:])
+ out.Uint64(x.NodeId)
+ }
+ {
+ const prefix string = ",\"timestamp\":"
+ out.RawString(prefix)
+ out.Uint64(x.Timestamp)
+ }
+ {
+ const prefix string = ",\"meta\":"
+ out.RawString(prefix)
+ out.RawByte('[')
+ for i := range x.Meta {
+ if i != 0 {
+ out.RawByte(',')
+ }
+ x.Meta[i].MarshalEasyJSON(out)
+ }
+ out.RawByte(']')
+ }
+ {
+ const prefix string = ",\"parentId\":"
+ out.RawString(prefix)
+ out.Uint64(x.ParentId)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *GetNodeByPathResponse_Info) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *GetNodeByPathResponse_Info) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "nodeId":
+ {
+ var f uint64
+ f = in.Uint64()
+ x.NodeId = f
+ }
+ case "timestamp":
+ {
+ var f uint64
+ f = in.Uint64()
+ x.Timestamp = f
+ }
+ case "meta":
+ {
+ var f *KeyValue
+ var list []*KeyValue
+ in.Delim('[')
+ for !in.IsDelim(']') {
+ f = new(KeyValue)
+ f.UnmarshalEasyJSON(in)
+ list = append(list, f)
+ in.WantComma()
+ }
+ x.Meta = list
+ in.Delim(']')
+ }
+ case "parentId":
+ {
+ var f uint64
+ f = in.Uint64()
+ x.ParentId = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type GetNodeByPathResponse_Body struct {
+ Nodes []*GetNodeByPathResponse_Info `json:"nodes"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*GetNodeByPathResponse_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*GetNodeByPathResponse_Body)(nil)
+ _ json.Marshaler = (*GetNodeByPathResponse_Body)(nil)
+ _ json.Unmarshaler = (*GetNodeByPathResponse_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -883,28 +3953,146 @@ func (x *GetNodeByPathResponse_Body) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *GetNodeByPathResponse_Body) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- for i := range x.Nodes {
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Nodes[i])
- }
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *GetNodeByPathResponse_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *GetNodeByPathResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ for i := range x.Nodes {
+ if x.Nodes[i] != nil && x.Nodes[i].StableSize() != 0 {
+ x.Nodes[i].EmitProtobuf(mm.AppendMessage(1))
+ }
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *GetNodeByPathResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "GetNodeByPathResponse_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // Nodes
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Nodes")
+ }
+ x.Nodes = append(x.Nodes, new(GetNodeByPathResponse_Info))
+ ff := x.Nodes[len(x.Nodes)-1]
+ if err := ff.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *GetNodeByPathResponse_Body) GetNodes() []*GetNodeByPathResponse_Info {
+ if x != nil {
+ return x.Nodes
+ }
+ return nil
+}
+func (x *GetNodeByPathResponse_Body) SetNodes(v []*GetNodeByPathResponse_Info) {
+ x.Nodes = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *GetNodeByPathResponse_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *GetNodeByPathResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"nodes\":"
+ out.RawString(prefix[1:])
+ out.RawByte('[')
+ for i := range x.Nodes {
+ if i != 0 {
+ out.RawByte(',')
+ }
+ x.Nodes[i].MarshalEasyJSON(out)
+ }
+ out.RawByte(']')
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *GetNodeByPathResponse_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *GetNodeByPathResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "nodes":
+ {
+ var f *GetNodeByPathResponse_Info
+ var list []*GetNodeByPathResponse_Info
+ in.Delim('[')
+ for !in.IsDelim(']') {
+ f = new(GetNodeByPathResponse_Info)
+ f.UnmarshalEasyJSON(in)
+ list = append(list, f)
+ in.WantComma()
+ }
+ x.Nodes = list
+ in.Delim(']')
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type GetNodeByPathResponse struct {
+ Body *GetNodeByPathResponse_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*GetNodeByPathResponse)(nil)
+ _ encoding.ProtoUnmarshaler = (*GetNodeByPathResponse)(nil)
+ _ json.Marshaler = (*GetNodeByPathResponse)(nil)
+ _ json.Unmarshaler = (*GetNodeByPathResponse)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -917,27 +4105,6 @@ func (x *GetNodeByPathResponse) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *GetNodeByPathResponse) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -953,13 +4120,196 @@ func (x *GetNodeByPathResponse) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *GetNodeByPathResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *GetNodeByPathResponse) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *GetNodeByPathResponse) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *GetNodeByPathResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil && x.Body.StableSize() != 0 {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil && x.Signature.StableSize() != 0 {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *GetNodeByPathResponse) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "GetNodeByPathResponse")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(GetNodeByPathResponse_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *GetNodeByPathResponse) GetBody() *GetNodeByPathResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *GetNodeByPathResponse) SetBody(v *GetNodeByPathResponse_Body) {
+ x.Body = v
+}
+func (x *GetNodeByPathResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *GetNodeByPathResponse) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *GetNodeByPathResponse) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *GetNodeByPathResponse) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ const prefix string = ",\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *GetNodeByPathResponse) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *GetNodeByPathResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *GetNodeByPathResponse_Body
+ f = new(GetNodeByPathResponse_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type GetSubTreeRequest_Body_Order_Direction int32
+
+const (
+ GetSubTreeRequest_Body_Order_None GetSubTreeRequest_Body_Order_Direction = 0
+ GetSubTreeRequest_Body_Order_Asc GetSubTreeRequest_Body_Order_Direction = 1
+)
+
+var (
+ GetSubTreeRequest_Body_Order_Direction_name = map[int32]string{
+ 0: "None",
+ 1: "Asc",
+ }
+ GetSubTreeRequest_Body_Order_Direction_value = map[string]int32{
+ "None": 0,
+ "Asc": 1,
+ }
+)
+
+func (x GetSubTreeRequest_Body_Order_Direction) String() string {
+ if v, ok := GetSubTreeRequest_Body_Order_Direction_name[int32(x)]; ok {
+ return v
+ }
+ return strconv.FormatInt(int64(x), 10)
+}
+func (x *GetSubTreeRequest_Body_Order_Direction) FromString(s string) bool {
+ if v, ok := GetSubTreeRequest_Body_Order_Direction_value[s]; ok {
+ *x = GetSubTreeRequest_Body_Order_Direction(v)
+ return true
+ }
+ return false
+}
+
+type GetSubTreeRequest_Body_Order struct {
+ Direction GetSubTreeRequest_Body_Order_Direction `json:"direction"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*GetSubTreeRequest_Body_Order)(nil)
+ _ encoding.ProtoUnmarshaler = (*GetSubTreeRequest_Body_Order)(nil)
+ _ json.Marshaler = (*GetSubTreeRequest_Body_Order)(nil)
+ _ json.Unmarshaler = (*GetSubTreeRequest_Body_Order)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -971,26 +4321,145 @@ func (x *GetSubTreeRequest_Body_Order) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *GetSubTreeRequest_Body_Order) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.EnumMarshal(1, buf[offset:], int32(x.Direction))
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *GetSubTreeRequest_Body_Order) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *GetSubTreeRequest_Body_Order) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if int32(x.Direction) != 0 {
+ mm.AppendInt32(1, int32(x.Direction))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *GetSubTreeRequest_Body_Order) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "GetSubTreeRequest_Body_Order")
+ }
+ switch fc.FieldNum {
+ case 1: // Direction
+ data, ok := fc.Int32()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Direction")
+ }
+ x.Direction = GetSubTreeRequest_Body_Order_Direction(data)
+ }
+ }
+ return nil
+}
+func (x *GetSubTreeRequest_Body_Order) GetDirection() GetSubTreeRequest_Body_Order_Direction {
+ if x != nil {
+ return x.Direction
+ }
+ return 0
+}
+func (x *GetSubTreeRequest_Body_Order) SetDirection(v GetSubTreeRequest_Body_Order_Direction) {
+ x.Direction = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *GetSubTreeRequest_Body_Order) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *GetSubTreeRequest_Body_Order) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"direction\":"
+ out.RawString(prefix[1:])
+ out.Int32(int32(x.Direction))
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *GetSubTreeRequest_Body_Order) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *GetSubTreeRequest_Body_Order) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "direction":
+ {
+ var f GetSubTreeRequest_Body_Order_Direction
+ var parsedValue GetSubTreeRequest_Body_Order_Direction
+ switch v := in.Interface().(type) {
+ case string:
+ if vv, ok := GetSubTreeRequest_Body_Order_Direction_value[v]; ok {
+ parsedValue = GetSubTreeRequest_Body_Order_Direction(vv)
+ break
+ }
+ vv, err := strconv.ParseInt(v, 10, 32)
+ if err != nil {
+ in.AddError(err)
+ return
+ }
+ parsedValue = GetSubTreeRequest_Body_Order_Direction(vv)
+ case float64:
+ parsedValue = GetSubTreeRequest_Body_Order_Direction(v)
+ }
+ f = parsedValue
+ x.Direction = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type GetSubTreeRequest_Body struct {
+ ContainerId []byte `json:"containerId"`
+ TreeId string `json:"treeId"`
+ RootId []uint64 `json:"rootId"`
+ Depth uint32 `json:"depth"`
+ BearerToken []byte `json:"bearerToken"`
+ OrderBy *GetSubTreeRequest_Body_Order `json:"orderBy"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*GetSubTreeRequest_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*GetSubTreeRequest_Body)(nil)
+ _ json.Marshaler = (*GetSubTreeRequest_Body)(nil)
+ _ json.Unmarshaler = (*GetSubTreeRequest_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -1009,36 +4478,287 @@ func (x *GetSubTreeRequest_Body) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *GetSubTreeRequest_Body) StableMarshal(buf []byte) []byte {
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *GetSubTreeRequest_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *GetSubTreeRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
- return []byte{}
+ return
}
- if buf == nil {
- buf = make([]byte, x.StableSize())
+ if len(x.ContainerId) != 0 {
+ mm.AppendBytes(1, x.ContainerId)
}
- var offset int
- offset += proto.BytesMarshal(1, buf[offset:], x.ContainerId)
- offset += proto.StringMarshal(2, buf[offset:], x.TreeId)
- for i := range x.RootId {
- {
- prefix := protowire.EncodeTag(protowire.Number(3), protowire.VarintType)
- offset += binary.PutUvarint(buf[offset:], uint64(prefix))
- offset += binary.PutUvarint(buf[offset:], x.RootId[i])
+ if len(x.TreeId) != 0 {
+ mm.AppendString(2, x.TreeId)
+ }
+ for j := range x.RootId {
+ mm.AppendUint64(3, x.RootId[j])
+ }
+ if x.Depth != 0 {
+ mm.AppendUint32(4, x.Depth)
+ }
+ if len(x.BearerToken) != 0 {
+ mm.AppendBytes(5, x.BearerToken)
+ }
+ if x.OrderBy != nil && x.OrderBy.StableSize() != 0 {
+ x.OrderBy.EmitProtobuf(mm.AppendMessage(6))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *GetSubTreeRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "GetSubTreeRequest_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // ContainerId
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "ContainerId")
+ }
+ x.ContainerId = data
+ case 2: // TreeId
+ data, ok := fc.String()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "TreeId")
+ }
+ x.TreeId = data
+ case 3: // RootId
+ data, ok := fc.Uint64()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "RootId")
+ }
+ x.RootId = append(x.RootId, data)
+ case 4: // Depth
+ data, ok := fc.Uint32()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Depth")
+ }
+ x.Depth = data
+ case 5: // BearerToken
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "BearerToken")
+ }
+ x.BearerToken = data
+ case 6: // OrderBy
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "OrderBy")
+ }
+ x.OrderBy = new(GetSubTreeRequest_Body_Order)
+ if err := x.OrderBy.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
}
}
- offset += proto.UInt32Marshal(4, buf[offset:], x.Depth)
- offset += proto.BytesMarshal(5, buf[offset:], x.BearerToken)
- offset += proto.NestedStructureMarshal(6, buf[offset:], x.OrderBy)
- return buf
+ return nil
}
+func (x *GetSubTreeRequest_Body) GetContainerId() []byte {
+ if x != nil {
+ return x.ContainerId
+ }
+ return nil
+}
+func (x *GetSubTreeRequest_Body) SetContainerId(v []byte) {
+ x.ContainerId = v
+}
+func (x *GetSubTreeRequest_Body) GetTreeId() string {
+ if x != nil {
+ return x.TreeId
+ }
+ return ""
+}
+func (x *GetSubTreeRequest_Body) SetTreeId(v string) {
+ x.TreeId = v
+}
+func (x *GetSubTreeRequest_Body) GetRootId() []uint64 {
+ if x != nil {
+ return x.RootId
+ }
+ return nil
+}
+func (x *GetSubTreeRequest_Body) SetRootId(v []uint64) {
+ x.RootId = v
+}
+func (x *GetSubTreeRequest_Body) GetDepth() uint32 {
+ if x != nil {
+ return x.Depth
+ }
+ return 0
+}
+func (x *GetSubTreeRequest_Body) SetDepth(v uint32) {
+ x.Depth = v
+}
+func (x *GetSubTreeRequest_Body) GetBearerToken() []byte {
+ if x != nil {
+ return x.BearerToken
+ }
+ return nil
+}
+func (x *GetSubTreeRequest_Body) SetBearerToken(v []byte) {
+ x.BearerToken = v
+}
+func (x *GetSubTreeRequest_Body) GetOrderBy() *GetSubTreeRequest_Body_Order {
+ if x != nil {
+ return x.OrderBy
+ }
+ return nil
+}
+func (x *GetSubTreeRequest_Body) SetOrderBy(v *GetSubTreeRequest_Body_Order) {
+ x.OrderBy = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *GetSubTreeRequest_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *GetSubTreeRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"containerId\":"
+ out.RawString(prefix[1:])
+ out.Base64Bytes(x.ContainerId)
+ }
+ {
+ const prefix string = ",\"treeId\":"
+ out.RawString(prefix)
+ out.String(x.TreeId)
+ }
+ {
+ const prefix string = ",\"rootId\":"
+ out.RawString(prefix)
+ out.RawByte('[')
+ for i := range x.RootId {
+ if i != 0 {
+ out.RawByte(',')
+ }
+ out.Uint64(x.RootId[i])
+ }
+ out.RawByte(']')
+ }
+ {
+ const prefix string = ",\"depth\":"
+ out.RawString(prefix)
+ out.Uint32(x.Depth)
+ }
+ {
+ const prefix string = ",\"bearerToken\":"
+ out.RawString(prefix)
+ out.Base64Bytes(x.BearerToken)
+ }
+ {
+ const prefix string = ",\"orderBy\":"
+ out.RawString(prefix)
+ x.OrderBy.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *GetSubTreeRequest_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *GetSubTreeRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "containerId":
+ {
+ var f []byte
+ f = in.Bytes()
+ x.ContainerId = f
+ }
+ case "treeId":
+ {
+ var f string
+ f = in.String()
+ x.TreeId = f
+ }
+ case "rootId":
+ {
+ var f uint64
+ var list []uint64
+ in.Delim('[')
+ for !in.IsDelim(']') {
+ f = in.Uint64()
+ list = append(list, f)
+ in.WantComma()
+ }
+ x.RootId = list
+ in.Delim(']')
+ }
+ case "depth":
+ {
+ var f uint32
+ f = in.Uint32()
+ x.Depth = f
+ }
+ case "bearerToken":
+ {
+ var f []byte
+ f = in.Bytes()
+ x.BearerToken = f
+ }
+ case "orderBy":
+ {
+ var f *GetSubTreeRequest_Body_Order
+ f = new(GetSubTreeRequest_Body_Order)
+ f.UnmarshalEasyJSON(in)
+ x.OrderBy = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type GetSubTreeRequest struct {
+ Body *GetSubTreeRequest_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*GetSubTreeRequest)(nil)
+ _ encoding.ProtoUnmarshaler = (*GetSubTreeRequest)(nil)
+ _ json.Marshaler = (*GetSubTreeRequest)(nil)
+ _ json.Unmarshaler = (*GetSubTreeRequest)(nil)
+)
// StableSize returns the size of x in protobuf format.
//
@@ -1052,27 +4772,6 @@ func (x *GetSubTreeRequest) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *GetSubTreeRequest) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -1088,13 +4787,167 @@ func (x *GetSubTreeRequest) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *GetSubTreeRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *GetSubTreeRequest) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *GetSubTreeRequest) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *GetSubTreeRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil && x.Body.StableSize() != 0 {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil && x.Signature.StableSize() != 0 {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *GetSubTreeRequest) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "GetSubTreeRequest")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(GetSubTreeRequest_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *GetSubTreeRequest) GetBody() *GetSubTreeRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *GetSubTreeRequest) SetBody(v *GetSubTreeRequest_Body) {
+ x.Body = v
+}
+func (x *GetSubTreeRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *GetSubTreeRequest) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *GetSubTreeRequest) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *GetSubTreeRequest) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ const prefix string = ",\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *GetSubTreeRequest) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *GetSubTreeRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *GetSubTreeRequest_Body
+ f = new(GetSubTreeRequest_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type GetSubTreeResponse_Body struct {
+ NodeId []uint64 `json:"nodeId"`
+ ParentId []uint64 `json:"parentId"`
+ Timestamp []uint64 `json:"timestamp"`
+ Meta []*KeyValue `json:"meta"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*GetSubTreeResponse_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*GetSubTreeResponse_Body)(nil)
+ _ json.Marshaler = (*GetSubTreeResponse_Body)(nil)
+ _ json.Unmarshaler = (*GetSubTreeResponse_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -1117,49 +4970,275 @@ func (x *GetSubTreeResponse_Body) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *GetSubTreeResponse_Body) StableMarshal(buf []byte) []byte {
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *GetSubTreeResponse_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *GetSubTreeResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
- return []byte{}
+ return
}
- if buf == nil {
- buf = make([]byte, x.StableSize())
+ for j := range x.NodeId {
+ mm.AppendUint64(1, x.NodeId[j])
}
- var offset int
- for i := range x.NodeId {
- {
- prefix := protowire.EncodeTag(protowire.Number(1), protowire.VarintType)
- offset += binary.PutUvarint(buf[offset:], uint64(prefix))
- offset += binary.PutUvarint(buf[offset:], x.NodeId[i])
- }
+ for j := range x.ParentId {
+ mm.AppendUint64(2, x.ParentId[j])
}
- for i := range x.ParentId {
- {
- prefix := protowire.EncodeTag(protowire.Number(2), protowire.VarintType)
- offset += binary.PutUvarint(buf[offset:], uint64(prefix))
- offset += binary.PutUvarint(buf[offset:], x.ParentId[i])
- }
- }
- for i := range x.Timestamp {
- {
- prefix := protowire.EncodeTag(protowire.Number(3), protowire.VarintType)
- offset += binary.PutUvarint(buf[offset:], uint64(prefix))
- offset += binary.PutUvarint(buf[offset:], x.Timestamp[i])
- }
+ for j := range x.Timestamp {
+ mm.AppendUint64(3, x.Timestamp[j])
}
for i := range x.Meta {
- offset += proto.NestedStructureMarshal(4, buf[offset:], x.Meta[i])
+ if x.Meta[i] != nil && x.Meta[i].StableSize() != 0 {
+ x.Meta[i].EmitProtobuf(mm.AppendMessage(4))
+ }
}
- return buf
}
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *GetSubTreeResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "GetSubTreeResponse_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // NodeId
+ data, ok := fc.Uint64()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "NodeId")
+ }
+ x.NodeId = append(x.NodeId, data)
+ case 2: // ParentId
+ data, ok := fc.Uint64()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "ParentId")
+ }
+ x.ParentId = append(x.ParentId, data)
+ case 3: // Timestamp
+ data, ok := fc.Uint64()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Timestamp")
+ }
+ x.Timestamp = append(x.Timestamp, data)
+ case 4: // Meta
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Meta")
+ }
+ x.Meta = append(x.Meta, new(KeyValue))
+ ff := x.Meta[len(x.Meta)-1]
+ if err := ff.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *GetSubTreeResponse_Body) GetNodeId() []uint64 {
+ if x != nil {
+ return x.NodeId
+ }
+ return nil
+}
+func (x *GetSubTreeResponse_Body) SetNodeId(v []uint64) {
+ x.NodeId = v
+}
+func (x *GetSubTreeResponse_Body) GetParentId() []uint64 {
+ if x != nil {
+ return x.ParentId
+ }
+ return nil
+}
+func (x *GetSubTreeResponse_Body) SetParentId(v []uint64) {
+ x.ParentId = v
+}
+func (x *GetSubTreeResponse_Body) GetTimestamp() []uint64 {
+ if x != nil {
+ return x.Timestamp
+ }
+ return nil
+}
+func (x *GetSubTreeResponse_Body) SetTimestamp(v []uint64) {
+ x.Timestamp = v
+}
+func (x *GetSubTreeResponse_Body) GetMeta() []*KeyValue {
+ if x != nil {
+ return x.Meta
+ }
+ return nil
+}
+func (x *GetSubTreeResponse_Body) SetMeta(v []*KeyValue) {
+ x.Meta = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *GetSubTreeResponse_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *GetSubTreeResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"nodeId\":"
+ out.RawString(prefix[1:])
+ out.RawByte('[')
+ for i := range x.NodeId {
+ if i != 0 {
+ out.RawByte(',')
+ }
+ out.Uint64(x.NodeId[i])
+ }
+ out.RawByte(']')
+ }
+ {
+ const prefix string = ",\"parentId\":"
+ out.RawString(prefix)
+ out.RawByte('[')
+ for i := range x.ParentId {
+ if i != 0 {
+ out.RawByte(',')
+ }
+ out.Uint64(x.ParentId[i])
+ }
+ out.RawByte(']')
+ }
+ {
+ const prefix string = ",\"timestamp\":"
+ out.RawString(prefix)
+ out.RawByte('[')
+ for i := range x.Timestamp {
+ if i != 0 {
+ out.RawByte(',')
+ }
+ out.Uint64(x.Timestamp[i])
+ }
+ out.RawByte(']')
+ }
+ {
+ const prefix string = ",\"meta\":"
+ out.RawString(prefix)
+ out.RawByte('[')
+ for i := range x.Meta {
+ if i != 0 {
+ out.RawByte(',')
+ }
+ x.Meta[i].MarshalEasyJSON(out)
+ }
+ out.RawByte(']')
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *GetSubTreeResponse_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *GetSubTreeResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "nodeId":
+ {
+ var f uint64
+ var list []uint64
+ in.Delim('[')
+ for !in.IsDelim(']') {
+ f = in.Uint64()
+ list = append(list, f)
+ in.WantComma()
+ }
+ x.NodeId = list
+ in.Delim(']')
+ }
+ case "parentId":
+ {
+ var f uint64
+ var list []uint64
+ in.Delim('[')
+ for !in.IsDelim(']') {
+ f = in.Uint64()
+ list = append(list, f)
+ in.WantComma()
+ }
+ x.ParentId = list
+ in.Delim(']')
+ }
+ case "timestamp":
+ {
+ var f uint64
+ var list []uint64
+ in.Delim('[')
+ for !in.IsDelim(']') {
+ f = in.Uint64()
+ list = append(list, f)
+ in.WantComma()
+ }
+ x.Timestamp = list
+ in.Delim(']')
+ }
+ case "meta":
+ {
+ var f *KeyValue
+ var list []*KeyValue
+ in.Delim('[')
+ for !in.IsDelim(']') {
+ f = new(KeyValue)
+ f.UnmarshalEasyJSON(in)
+ list = append(list, f)
+ in.WantComma()
+ }
+ x.Meta = list
+ in.Delim(']')
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type GetSubTreeResponse struct {
+ Body *GetSubTreeResponse_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*GetSubTreeResponse)(nil)
+ _ encoding.ProtoUnmarshaler = (*GetSubTreeResponse)(nil)
+ _ json.Marshaler = (*GetSubTreeResponse)(nil)
+ _ json.Unmarshaler = (*GetSubTreeResponse)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -1172,27 +5251,6 @@ func (x *GetSubTreeResponse) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *GetSubTreeResponse) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -1208,13 +5266,164 @@ func (x *GetSubTreeResponse) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *GetSubTreeResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *GetSubTreeResponse) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *GetSubTreeResponse) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *GetSubTreeResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil && x.Body.StableSize() != 0 {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil && x.Signature.StableSize() != 0 {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *GetSubTreeResponse) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "GetSubTreeResponse")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(GetSubTreeResponse_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *GetSubTreeResponse) GetBody() *GetSubTreeResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *GetSubTreeResponse) SetBody(v *GetSubTreeResponse_Body) {
+ x.Body = v
+}
+func (x *GetSubTreeResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *GetSubTreeResponse) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *GetSubTreeResponse) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *GetSubTreeResponse) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ const prefix string = ",\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *GetSubTreeResponse) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *GetSubTreeResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *GetSubTreeResponse_Body
+ f = new(GetSubTreeResponse_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type TreeListRequest_Body struct {
+ ContainerId []byte `json:"containerId"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*TreeListRequest_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*TreeListRequest_Body)(nil)
+ _ json.Marshaler = (*TreeListRequest_Body)(nil)
+ _ json.Unmarshaler = (*TreeListRequest_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -1226,26 +5435,125 @@ func (x *TreeListRequest_Body) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *TreeListRequest_Body) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.BytesMarshal(1, buf[offset:], x.ContainerId)
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *TreeListRequest_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *TreeListRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if len(x.ContainerId) != 0 {
+ mm.AppendBytes(1, x.ContainerId)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *TreeListRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "TreeListRequest_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // ContainerId
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "ContainerId")
+ }
+ x.ContainerId = data
+ }
+ }
+ return nil
+}
+func (x *TreeListRequest_Body) GetContainerId() []byte {
+ if x != nil {
+ return x.ContainerId
+ }
+ return nil
+}
+func (x *TreeListRequest_Body) SetContainerId(v []byte) {
+ x.ContainerId = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *TreeListRequest_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *TreeListRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"containerId\":"
+ out.RawString(prefix[1:])
+ out.Base64Bytes(x.ContainerId)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *TreeListRequest_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *TreeListRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "containerId":
+ {
+ var f []byte
+ f = in.Bytes()
+ x.ContainerId = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type TreeListRequest struct {
+ Body *TreeListRequest_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*TreeListRequest)(nil)
+ _ encoding.ProtoUnmarshaler = (*TreeListRequest)(nil)
+ _ json.Marshaler = (*TreeListRequest)(nil)
+ _ json.Unmarshaler = (*TreeListRequest)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -1258,27 +5566,6 @@ func (x *TreeListRequest) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *TreeListRequest) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -1294,13 +5581,164 @@ func (x *TreeListRequest) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *TreeListRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *TreeListRequest) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *TreeListRequest) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *TreeListRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil && x.Body.StableSize() != 0 {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil && x.Signature.StableSize() != 0 {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *TreeListRequest) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "TreeListRequest")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(TreeListRequest_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *TreeListRequest) GetBody() *TreeListRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *TreeListRequest) SetBody(v *TreeListRequest_Body) {
+ x.Body = v
+}
+func (x *TreeListRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *TreeListRequest) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *TreeListRequest) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *TreeListRequest) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ const prefix string = ",\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *TreeListRequest) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *TreeListRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *TreeListRequest_Body
+ f = new(TreeListRequest_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type TreeListResponse_Body struct {
+ Ids []string `json:"ids"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*TreeListResponse_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*TreeListResponse_Body)(nil)
+ _ json.Marshaler = (*TreeListResponse_Body)(nil)
+ _ json.Unmarshaler = (*TreeListResponse_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -1312,26 +5750,139 @@ func (x *TreeListResponse_Body) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *TreeListResponse_Body) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.RepeatedStringMarshal(1, buf[offset:], x.Ids)
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *TreeListResponse_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *TreeListResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ for j := range x.Ids {
+ mm.AppendString(1, x.Ids[j])
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *TreeListResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "TreeListResponse_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // Ids
+ data, ok := fc.String()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Ids")
+ }
+ x.Ids = append(x.Ids, data)
+ }
+ }
+ return nil
+}
+func (x *TreeListResponse_Body) GetIds() []string {
+ if x != nil {
+ return x.Ids
+ }
+ return nil
+}
+func (x *TreeListResponse_Body) SetIds(v []string) {
+ x.Ids = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *TreeListResponse_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *TreeListResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"ids\":"
+ out.RawString(prefix[1:])
+ out.RawByte('[')
+ for i := range x.Ids {
+ if i != 0 {
+ out.RawByte(',')
+ }
+ out.String(x.Ids[i])
+ }
+ out.RawByte(']')
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *TreeListResponse_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *TreeListResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "ids":
+ {
+ var f string
+ var list []string
+ in.Delim('[')
+ for !in.IsDelim(']') {
+ f = in.String()
+ list = append(list, f)
+ in.WantComma()
+ }
+ x.Ids = list
+ in.Delim(']')
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type TreeListResponse struct {
+ Body *TreeListResponse_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*TreeListResponse)(nil)
+ _ encoding.ProtoUnmarshaler = (*TreeListResponse)(nil)
+ _ json.Marshaler = (*TreeListResponse)(nil)
+ _ json.Unmarshaler = (*TreeListResponse)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -1344,27 +5895,6 @@ func (x *TreeListResponse) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *TreeListResponse) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -1380,13 +5910,166 @@ func (x *TreeListResponse) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *TreeListResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *TreeListResponse) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *TreeListResponse) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *TreeListResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil && x.Body.StableSize() != 0 {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil && x.Signature.StableSize() != 0 {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *TreeListResponse) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "TreeListResponse")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(TreeListResponse_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *TreeListResponse) GetBody() *TreeListResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *TreeListResponse) SetBody(v *TreeListResponse_Body) {
+ x.Body = v
+}
+func (x *TreeListResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *TreeListResponse) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *TreeListResponse) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *TreeListResponse) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ const prefix string = ",\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *TreeListResponse) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *TreeListResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *TreeListResponse_Body
+ f = new(TreeListResponse_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type ApplyRequest_Body struct {
+ ContainerId []byte `json:"containerId"`
+ TreeId string `json:"treeId"`
+ Operation *LogMove `json:"operation"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*ApplyRequest_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*ApplyRequest_Body)(nil)
+ _ json.Marshaler = (*ApplyRequest_Body)(nil)
+ _ json.Unmarshaler = (*ApplyRequest_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -1400,28 +6083,187 @@ func (x *ApplyRequest_Body) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *ApplyRequest_Body) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.BytesMarshal(1, buf[offset:], x.ContainerId)
- offset += proto.StringMarshal(2, buf[offset:], x.TreeId)
- offset += proto.NestedStructureMarshal(3, buf[offset:], x.Operation)
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *ApplyRequest_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *ApplyRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if len(x.ContainerId) != 0 {
+ mm.AppendBytes(1, x.ContainerId)
+ }
+ if len(x.TreeId) != 0 {
+ mm.AppendString(2, x.TreeId)
+ }
+ if x.Operation != nil && x.Operation.StableSize() != 0 {
+ x.Operation.EmitProtobuf(mm.AppendMessage(3))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *ApplyRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "ApplyRequest_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // ContainerId
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "ContainerId")
+ }
+ x.ContainerId = data
+ case 2: // TreeId
+ data, ok := fc.String()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "TreeId")
+ }
+ x.TreeId = data
+ case 3: // Operation
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Operation")
+ }
+ x.Operation = new(LogMove)
+ if err := x.Operation.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *ApplyRequest_Body) GetContainerId() []byte {
+ if x != nil {
+ return x.ContainerId
+ }
+ return nil
+}
+func (x *ApplyRequest_Body) SetContainerId(v []byte) {
+ x.ContainerId = v
+}
+func (x *ApplyRequest_Body) GetTreeId() string {
+ if x != nil {
+ return x.TreeId
+ }
+ return ""
+}
+func (x *ApplyRequest_Body) SetTreeId(v string) {
+ x.TreeId = v
+}
+func (x *ApplyRequest_Body) GetOperation() *LogMove {
+ if x != nil {
+ return x.Operation
+ }
+ return nil
+}
+func (x *ApplyRequest_Body) SetOperation(v *LogMove) {
+ x.Operation = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *ApplyRequest_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *ApplyRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"containerId\":"
+ out.RawString(prefix[1:])
+ out.Base64Bytes(x.ContainerId)
+ }
+ {
+ const prefix string = ",\"treeId\":"
+ out.RawString(prefix)
+ out.String(x.TreeId)
+ }
+ {
+ const prefix string = ",\"operation\":"
+ out.RawString(prefix)
+ x.Operation.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *ApplyRequest_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *ApplyRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "containerId":
+ {
+ var f []byte
+ f = in.Bytes()
+ x.ContainerId = f
+ }
+ case "treeId":
+ {
+ var f string
+ f = in.String()
+ x.TreeId = f
+ }
+ case "operation":
+ {
+ var f *LogMove
+ f = new(LogMove)
+ f.UnmarshalEasyJSON(in)
+ x.Operation = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type ApplyRequest struct {
+ Body *ApplyRequest_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*ApplyRequest)(nil)
+ _ encoding.ProtoUnmarshaler = (*ApplyRequest)(nil)
+ _ json.Marshaler = (*ApplyRequest)(nil)
+ _ json.Unmarshaler = (*ApplyRequest)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -1434,27 +6276,6 @@ func (x *ApplyRequest) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *ApplyRequest) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -1470,13 +6291,163 @@ func (x *ApplyRequest) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *ApplyRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *ApplyRequest) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *ApplyRequest) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *ApplyRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil && x.Body.StableSize() != 0 {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil && x.Signature.StableSize() != 0 {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *ApplyRequest) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "ApplyRequest")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(ApplyRequest_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *ApplyRequest) GetBody() *ApplyRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *ApplyRequest) SetBody(v *ApplyRequest_Body) {
+ x.Body = v
+}
+func (x *ApplyRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *ApplyRequest) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *ApplyRequest) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *ApplyRequest) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ const prefix string = ",\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *ApplyRequest) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *ApplyRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *ApplyRequest_Body
+ f = new(ApplyRequest_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type ApplyResponse_Body struct {
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*ApplyResponse_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*ApplyResponse_Body)(nil)
+ _ json.Marshaler = (*ApplyResponse_Body)(nil)
+ _ json.Unmarshaler = (*ApplyResponse_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -1487,18 +6458,93 @@ func (x *ApplyResponse_Body) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *ApplyResponse_Body) StableMarshal(buf []byte) []byte {
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *ApplyResponse_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *ApplyResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *ApplyResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "ApplyResponse_Body")
+ }
+ switch fc.FieldNum {
+ }
+ }
+ return nil
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *ApplyResponse_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *ApplyResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *ApplyResponse_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *ApplyResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type ApplyResponse struct {
+ Body *ApplyResponse_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*ApplyResponse)(nil)
+ _ encoding.ProtoUnmarshaler = (*ApplyResponse)(nil)
+ _ json.Marshaler = (*ApplyResponse)(nil)
+ _ json.Unmarshaler = (*ApplyResponse)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -1511,27 +6557,6 @@ func (x *ApplyResponse) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *ApplyResponse) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -1547,13 +6572,167 @@ func (x *ApplyResponse) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *ApplyResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *ApplyResponse) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *ApplyResponse) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *ApplyResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil && x.Body.StableSize() != 0 {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil && x.Signature.StableSize() != 0 {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *ApplyResponse) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "ApplyResponse")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(ApplyResponse_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *ApplyResponse) GetBody() *ApplyResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *ApplyResponse) SetBody(v *ApplyResponse_Body) {
+ x.Body = v
+}
+func (x *ApplyResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *ApplyResponse) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *ApplyResponse) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *ApplyResponse) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ const prefix string = ",\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *ApplyResponse) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *ApplyResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *ApplyResponse_Body
+ f = new(ApplyResponse_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type GetOpLogRequest_Body struct {
+ ContainerId []byte `json:"containerId"`
+ TreeId string `json:"treeId"`
+ Height uint64 `json:"height"`
+ Count uint64 `json:"count"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*GetOpLogRequest_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*GetOpLogRequest_Body)(nil)
+ _ json.Marshaler = (*GetOpLogRequest_Body)(nil)
+ _ json.Unmarshaler = (*GetOpLogRequest_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -1568,29 +6747,212 @@ func (x *GetOpLogRequest_Body) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *GetOpLogRequest_Body) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.BytesMarshal(1, buf[offset:], x.ContainerId)
- offset += proto.StringMarshal(2, buf[offset:], x.TreeId)
- offset += proto.UInt64Marshal(3, buf[offset:], x.Height)
- offset += proto.UInt64Marshal(4, buf[offset:], x.Count)
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *GetOpLogRequest_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *GetOpLogRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if len(x.ContainerId) != 0 {
+ mm.AppendBytes(1, x.ContainerId)
+ }
+ if len(x.TreeId) != 0 {
+ mm.AppendString(2, x.TreeId)
+ }
+ if x.Height != 0 {
+ mm.AppendUint64(3, x.Height)
+ }
+ if x.Count != 0 {
+ mm.AppendUint64(4, x.Count)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *GetOpLogRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "GetOpLogRequest_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // ContainerId
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "ContainerId")
+ }
+ x.ContainerId = data
+ case 2: // TreeId
+ data, ok := fc.String()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "TreeId")
+ }
+ x.TreeId = data
+ case 3: // Height
+ data, ok := fc.Uint64()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Height")
+ }
+ x.Height = data
+ case 4: // Count
+ data, ok := fc.Uint64()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Count")
+ }
+ x.Count = data
+ }
+ }
+ return nil
+}
+func (x *GetOpLogRequest_Body) GetContainerId() []byte {
+ if x != nil {
+ return x.ContainerId
+ }
+ return nil
+}
+func (x *GetOpLogRequest_Body) SetContainerId(v []byte) {
+ x.ContainerId = v
+}
+func (x *GetOpLogRequest_Body) GetTreeId() string {
+ if x != nil {
+ return x.TreeId
+ }
+ return ""
+}
+func (x *GetOpLogRequest_Body) SetTreeId(v string) {
+ x.TreeId = v
+}
+func (x *GetOpLogRequest_Body) GetHeight() uint64 {
+ if x != nil {
+ return x.Height
+ }
+ return 0
+}
+func (x *GetOpLogRequest_Body) SetHeight(v uint64) {
+ x.Height = v
+}
+func (x *GetOpLogRequest_Body) GetCount() uint64 {
+ if x != nil {
+ return x.Count
+ }
+ return 0
+}
+func (x *GetOpLogRequest_Body) SetCount(v uint64) {
+ x.Count = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *GetOpLogRequest_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *GetOpLogRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"containerId\":"
+ out.RawString(prefix[1:])
+ out.Base64Bytes(x.ContainerId)
+ }
+ {
+ const prefix string = ",\"treeId\":"
+ out.RawString(prefix)
+ out.String(x.TreeId)
+ }
+ {
+ const prefix string = ",\"height\":"
+ out.RawString(prefix)
+ out.Uint64(x.Height)
+ }
+ {
+ const prefix string = ",\"count\":"
+ out.RawString(prefix)
+ out.Uint64(x.Count)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *GetOpLogRequest_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *GetOpLogRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "containerId":
+ {
+ var f []byte
+ f = in.Bytes()
+ x.ContainerId = f
+ }
+ case "treeId":
+ {
+ var f string
+ f = in.String()
+ x.TreeId = f
+ }
+ case "height":
+ {
+ var f uint64
+ f = in.Uint64()
+ x.Height = f
+ }
+ case "count":
+ {
+ var f uint64
+ f = in.Uint64()
+ x.Count = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type GetOpLogRequest struct {
+ Body *GetOpLogRequest_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*GetOpLogRequest)(nil)
+ _ encoding.ProtoUnmarshaler = (*GetOpLogRequest)(nil)
+ _ json.Marshaler = (*GetOpLogRequest)(nil)
+ _ json.Unmarshaler = (*GetOpLogRequest)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -1603,27 +6965,6 @@ func (x *GetOpLogRequest) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *GetOpLogRequest) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -1639,13 +6980,164 @@ func (x *GetOpLogRequest) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *GetOpLogRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *GetOpLogRequest) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *GetOpLogRequest) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *GetOpLogRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil && x.Body.StableSize() != 0 {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil && x.Signature.StableSize() != 0 {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *GetOpLogRequest) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "GetOpLogRequest")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(GetOpLogRequest_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *GetOpLogRequest) GetBody() *GetOpLogRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *GetOpLogRequest) SetBody(v *GetOpLogRequest_Body) {
+ x.Body = v
+}
+func (x *GetOpLogRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *GetOpLogRequest) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *GetOpLogRequest) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *GetOpLogRequest) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ const prefix string = ",\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *GetOpLogRequest) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *GetOpLogRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *GetOpLogRequest_Body
+ f = new(GetOpLogRequest_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type GetOpLogResponse_Body struct {
+ Operation *LogMove `json:"operation"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*GetOpLogResponse_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*GetOpLogResponse_Body)(nil)
+ _ json.Marshaler = (*GetOpLogResponse_Body)(nil)
+ _ json.Unmarshaler = (*GetOpLogResponse_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -1657,26 +7149,129 @@ func (x *GetOpLogResponse_Body) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *GetOpLogResponse_Body) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Operation)
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *GetOpLogResponse_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *GetOpLogResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Operation != nil && x.Operation.StableSize() != 0 {
+ x.Operation.EmitProtobuf(mm.AppendMessage(1))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *GetOpLogResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "GetOpLogResponse_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // Operation
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Operation")
+ }
+ x.Operation = new(LogMove)
+ if err := x.Operation.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *GetOpLogResponse_Body) GetOperation() *LogMove {
+ if x != nil {
+ return x.Operation
+ }
+ return nil
+}
+func (x *GetOpLogResponse_Body) SetOperation(v *LogMove) {
+ x.Operation = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *GetOpLogResponse_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *GetOpLogResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"operation\":"
+ out.RawString(prefix[1:])
+ x.Operation.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *GetOpLogResponse_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *GetOpLogResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "operation":
+ {
+ var f *LogMove
+ f = new(LogMove)
+ f.UnmarshalEasyJSON(in)
+ x.Operation = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type GetOpLogResponse struct {
+ Body *GetOpLogResponse_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*GetOpLogResponse)(nil)
+ _ encoding.ProtoUnmarshaler = (*GetOpLogResponse)(nil)
+ _ json.Marshaler = (*GetOpLogResponse)(nil)
+ _ json.Unmarshaler = (*GetOpLogResponse)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -1689,27 +7284,6 @@ func (x *GetOpLogResponse) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *GetOpLogResponse) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -1725,13 +7299,163 @@ func (x *GetOpLogResponse) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *GetOpLogResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *GetOpLogResponse) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *GetOpLogResponse) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *GetOpLogResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil && x.Body.StableSize() != 0 {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil && x.Signature.StableSize() != 0 {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *GetOpLogResponse) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "GetOpLogResponse")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(GetOpLogResponse_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *GetOpLogResponse) GetBody() *GetOpLogResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *GetOpLogResponse) SetBody(v *GetOpLogResponse_Body) {
+ x.Body = v
+}
+func (x *GetOpLogResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *GetOpLogResponse) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *GetOpLogResponse) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *GetOpLogResponse) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ const prefix string = ",\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *GetOpLogResponse) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *GetOpLogResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *GetOpLogResponse_Body
+ f = new(GetOpLogResponse_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type HealthcheckResponse_Body struct {
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*HealthcheckResponse_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*HealthcheckResponse_Body)(nil)
+ _ json.Marshaler = (*HealthcheckResponse_Body)(nil)
+ _ json.Unmarshaler = (*HealthcheckResponse_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -1742,18 +7466,93 @@ func (x *HealthcheckResponse_Body) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *HealthcheckResponse_Body) StableMarshal(buf []byte) []byte {
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *HealthcheckResponse_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *HealthcheckResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *HealthcheckResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "HealthcheckResponse_Body")
+ }
+ switch fc.FieldNum {
+ }
+ }
+ return nil
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *HealthcheckResponse_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *HealthcheckResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *HealthcheckResponse_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *HealthcheckResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type HealthcheckResponse struct {
+ Body *HealthcheckResponse_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*HealthcheckResponse)(nil)
+ _ encoding.ProtoUnmarshaler = (*HealthcheckResponse)(nil)
+ _ json.Marshaler = (*HealthcheckResponse)(nil)
+ _ json.Unmarshaler = (*HealthcheckResponse)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -1766,27 +7565,6 @@ func (x *HealthcheckResponse) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *HealthcheckResponse) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -1802,13 +7580,163 @@ func (x *HealthcheckResponse) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *HealthcheckResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *HealthcheckResponse) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *HealthcheckResponse) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *HealthcheckResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil && x.Body.StableSize() != 0 {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil && x.Signature.StableSize() != 0 {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *HealthcheckResponse) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "HealthcheckResponse")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(HealthcheckResponse_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *HealthcheckResponse) GetBody() *HealthcheckResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *HealthcheckResponse) SetBody(v *HealthcheckResponse_Body) {
+ x.Body = v
+}
+func (x *HealthcheckResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *HealthcheckResponse) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *HealthcheckResponse) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *HealthcheckResponse) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ const prefix string = ",\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *HealthcheckResponse) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *HealthcheckResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *HealthcheckResponse_Body
+ f = new(HealthcheckResponse_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type HealthcheckRequest_Body struct {
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*HealthcheckRequest_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*HealthcheckRequest_Body)(nil)
+ _ json.Marshaler = (*HealthcheckRequest_Body)(nil)
+ _ json.Unmarshaler = (*HealthcheckRequest_Body)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -1819,18 +7747,93 @@ func (x *HealthcheckRequest_Body) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *HealthcheckRequest_Body) StableMarshal(buf []byte) []byte {
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *HealthcheckRequest_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *HealthcheckRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *HealthcheckRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "HealthcheckRequest_Body")
+ }
+ switch fc.FieldNum {
+ }
+ }
+ return nil
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *HealthcheckRequest_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *HealthcheckRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *HealthcheckRequest_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *HealthcheckRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type HealthcheckRequest struct {
+ Body *HealthcheckRequest_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*HealthcheckRequest)(nil)
+ _ encoding.ProtoUnmarshaler = (*HealthcheckRequest)(nil)
+ _ json.Marshaler = (*HealthcheckRequest)(nil)
+ _ json.Unmarshaler = (*HealthcheckRequest)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -1843,27 +7846,6 @@ func (x *HealthcheckRequest) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *HealthcheckRequest) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
// ReadSignedData fills buf with signed data of x.
// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
//
@@ -1879,9 +7861,149 @@ func (x *HealthcheckRequest) SignedDataSize() int {
//
// Structures with the same field values have the same signed data size.
func (x *HealthcheckRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
+ return x.GetBody().MarshalProtobuf(buf), nil
}
-func (x *HealthcheckRequest) SetSignature(sig *Signature) {
- x.Signature = sig
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *HealthcheckRequest) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *HealthcheckRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil && x.Body.StableSize() != 0 {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil && x.Signature.StableSize() != 0 {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *HealthcheckRequest) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "HealthcheckRequest")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(HealthcheckRequest_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *HealthcheckRequest) GetBody() *HealthcheckRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *HealthcheckRequest) SetBody(v *HealthcheckRequest_Body) {
+ x.Body = v
+}
+func (x *HealthcheckRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *HealthcheckRequest) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *HealthcheckRequest) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *HealthcheckRequest) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ const prefix string = ",\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *HealthcheckRequest) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *HealthcheckRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *HealthcheckRequest_Body
+ f = new(HealthcheckRequest_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
}
diff --git a/pkg/services/tree/types.pb.go b/pkg/services/tree/types.pb.go
deleted file mode 100644
index 6464ccb77..000000000
--- a/pkg/services/tree/types.pb.go
+++ /dev/null
@@ -1,320 +0,0 @@
-//*
-// Auxiliary structures to use with tree service.
-
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// versions:
-// protoc-gen-go v1.33.0
-// protoc v4.25.0
-// source: pkg/services/tree/types.proto
-
-package tree
-
-import (
- protoreflect "google.golang.org/protobuf/reflect/protoreflect"
- protoimpl "google.golang.org/protobuf/runtime/protoimpl"
- reflect "reflect"
- sync "sync"
-)
-
-const (
- // Verify that this generated code is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
- // Verify that runtime/protoimpl is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
-)
-
-// KeyValue represents key-value pair attached to an object.
-type KeyValue struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Attribute name.
- Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
- // Attribute value.
- Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
-}
-
-func (x *KeyValue) Reset() {
- *x = KeyValue{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_types_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *KeyValue) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*KeyValue) ProtoMessage() {}
-
-func (x *KeyValue) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_types_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use KeyValue.ProtoReflect.Descriptor instead.
-func (*KeyValue) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_types_proto_rawDescGZIP(), []int{0}
-}
-
-func (x *KeyValue) GetKey() string {
- if x != nil {
- return x.Key
- }
- return ""
-}
-
-func (x *KeyValue) GetValue() []byte {
- if x != nil {
- return x.Value
- }
- return nil
-}
-
-// LogMove represents log-entry for a single move operation.
-type LogMove struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // ID of the parent node.
- ParentId uint64 `protobuf:"varint,1,opt,name=parent_id,json=parentID,proto3" json:"parent_id,omitempty"`
- // Node meta information, including operation timestamp.
- Meta []byte `protobuf:"bytes,2,opt,name=meta,proto3" json:"meta,omitempty"`
- // ID of the node to move.
- ChildId uint64 `protobuf:"varint,3,opt,name=child_id,json=childID,proto3" json:"child_id,omitempty"`
-}
-
-func (x *LogMove) Reset() {
- *x = LogMove{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_types_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *LogMove) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*LogMove) ProtoMessage() {}
-
-func (x *LogMove) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_types_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use LogMove.ProtoReflect.Descriptor instead.
-func (*LogMove) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_types_proto_rawDescGZIP(), []int{1}
-}
-
-func (x *LogMove) GetParentId() uint64 {
- if x != nil {
- return x.ParentId
- }
- return 0
-}
-
-func (x *LogMove) GetMeta() []byte {
- if x != nil {
- return x.Meta
- }
- return nil
-}
-
-func (x *LogMove) GetChildId() uint64 {
- if x != nil {
- return x.ChildId
- }
- return 0
-}
-
-// Signature of a message.
-type Signature struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Serialized public key as defined in FrostFS API.
- Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
- // Signature of a message body.
- Sign []byte `protobuf:"bytes,2,opt,name=sign,json=signature,proto3" json:"sign,omitempty"`
-}
-
-func (x *Signature) Reset() {
- *x = Signature{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_tree_types_proto_msgTypes[2]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Signature) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Signature) ProtoMessage() {}
-
-func (x *Signature) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_tree_types_proto_msgTypes[2]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Signature.ProtoReflect.Descriptor instead.
-func (*Signature) Descriptor() ([]byte, []int) {
- return file_pkg_services_tree_types_proto_rawDescGZIP(), []int{2}
-}
-
-func (x *Signature) GetKey() []byte {
- if x != nil {
- return x.Key
- }
- return nil
-}
-
-func (x *Signature) GetSign() []byte {
- if x != nil {
- return x.Sign
- }
- return nil
-}
-
-var File_pkg_services_tree_types_proto protoreflect.FileDescriptor
-
-var file_pkg_services_tree_types_proto_rawDesc = []byte{
- 0x0a, 0x1d, 0x70, 0x6b, 0x67, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x74,
- 0x72, 0x65, 0x65, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12,
- 0x04, 0x74, 0x72, 0x65, 0x65, 0x22, 0x32, 0x0a, 0x08, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75,
- 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03,
- 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01,
- 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x55, 0x0a, 0x07, 0x4c, 0x6f, 0x67,
- 0x4d, 0x6f, 0x76, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x69,
- 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x49,
- 0x44, 0x12, 0x12, 0x0a, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52,
- 0x04, 0x6d, 0x65, 0x74, 0x61, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x69,
- 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x49, 0x44,
- 0x22, 0x36, 0x0a, 0x09, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x10, 0x0a,
- 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12,
- 0x17, 0x0a, 0x04, 0x73, 0x69, 0x67, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x73,
- 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x42, 0x3e, 0x5a, 0x3c, 0x67, 0x69, 0x74, 0x2e,
- 0x66, 0x72, 0x6f, 0x73, 0x74, 0x66, 0x73, 0x2e, 0x69, 0x6e, 0x66, 0x6f, 0x2f, 0x54, 0x72, 0x75,
- 0x65, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x4c, 0x61, 0x62, 0x2f, 0x66, 0x72, 0x6f, 0x73, 0x74, 0x66,
- 0x73, 0x2d, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69,
- 0x63, 0x65, 0x73, 0x2f, 0x74, 0x72, 0x65, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
-
-var (
- file_pkg_services_tree_types_proto_rawDescOnce sync.Once
- file_pkg_services_tree_types_proto_rawDescData = file_pkg_services_tree_types_proto_rawDesc
-)
-
-func file_pkg_services_tree_types_proto_rawDescGZIP() []byte {
- file_pkg_services_tree_types_proto_rawDescOnce.Do(func() {
- file_pkg_services_tree_types_proto_rawDescData = protoimpl.X.CompressGZIP(file_pkg_services_tree_types_proto_rawDescData)
- })
- return file_pkg_services_tree_types_proto_rawDescData
-}
-
-var file_pkg_services_tree_types_proto_msgTypes = make([]protoimpl.MessageInfo, 3)
-var file_pkg_services_tree_types_proto_goTypes = []interface{}{
- (*KeyValue)(nil), // 0: tree.KeyValue
- (*LogMove)(nil), // 1: tree.LogMove
- (*Signature)(nil), // 2: tree.Signature
-}
-var file_pkg_services_tree_types_proto_depIdxs = []int32{
- 0, // [0:0] is the sub-list for method output_type
- 0, // [0:0] is the sub-list for method input_type
- 0, // [0:0] is the sub-list for extension type_name
- 0, // [0:0] is the sub-list for extension extendee
- 0, // [0:0] is the sub-list for field type_name
-}
-
-func init() { file_pkg_services_tree_types_proto_init() }
-func file_pkg_services_tree_types_proto_init() {
- if File_pkg_services_tree_types_proto != nil {
- return
- }
- if !protoimpl.UnsafeEnabled {
- file_pkg_services_tree_types_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*KeyValue); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_tree_types_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*LogMove); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_tree_types_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Signature); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
- type x struct{}
- out := protoimpl.TypeBuilder{
- File: protoimpl.DescBuilder{
- GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_pkg_services_tree_types_proto_rawDesc,
- NumEnums: 0,
- NumMessages: 3,
- NumExtensions: 0,
- NumServices: 0,
- },
- GoTypes: file_pkg_services_tree_types_proto_goTypes,
- DependencyIndexes: file_pkg_services_tree_types_proto_depIdxs,
- MessageInfos: file_pkg_services_tree_types_proto_msgTypes,
- }.Build()
- File_pkg_services_tree_types_proto = out.File
- file_pkg_services_tree_types_proto_rawDesc = nil
- file_pkg_services_tree_types_proto_goTypes = nil
- file_pkg_services_tree_types_proto_depIdxs = nil
-}
diff --git a/pkg/services/tree/types_frostfs.pb.go b/pkg/services/tree/types_frostfs.pb.go
index 707fcc3cc..4399f8a8b 100644
--- a/pkg/services/tree/types_frostfs.pb.go
+++ b/pkg/services/tree/types_frostfs.pb.go
@@ -2,7 +2,28 @@
package tree
-import "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/proto"
+import (
+ json "encoding/json"
+ fmt "fmt"
+ pool "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/pool"
+ proto "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/proto"
+ encoding "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/proto/encoding"
+ easyproto "github.com/VictoriaMetrics/easyproto"
+ jlexer "github.com/mailru/easyjson/jlexer"
+ jwriter "github.com/mailru/easyjson/jwriter"
+)
+
+type KeyValue struct {
+ Key string `json:"key"`
+ Value []byte `json:"value"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*KeyValue)(nil)
+ _ encoding.ProtoUnmarshaler = (*KeyValue)(nil)
+ _ json.Marshaler = (*KeyValue)(nil)
+ _ json.Unmarshaler = (*KeyValue)(nil)
+)
// StableSize returns the size of x in protobuf format.
//
@@ -16,27 +37,155 @@ func (x *KeyValue) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *KeyValue) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.StringMarshal(1, buf[offset:], x.Key)
- offset += proto.BytesMarshal(2, buf[offset:], x.Value)
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *KeyValue) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *KeyValue) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if len(x.Key) != 0 {
+ mm.AppendString(1, x.Key)
+ }
+ if len(x.Value) != 0 {
+ mm.AppendBytes(2, x.Value)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *KeyValue) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "KeyValue")
+ }
+ switch fc.FieldNum {
+ case 1: // Key
+ data, ok := fc.String()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Key")
+ }
+ x.Key = data
+ case 2: // Value
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Value")
+ }
+ x.Value = data
+ }
+ }
+ return nil
+}
+func (x *KeyValue) GetKey() string {
+ if x != nil {
+ return x.Key
+ }
+ return ""
+}
+func (x *KeyValue) SetKey(v string) {
+ x.Key = v
+}
+func (x *KeyValue) GetValue() []byte {
+ if x != nil {
+ return x.Value
+ }
+ return nil
+}
+func (x *KeyValue) SetValue(v []byte) {
+ x.Value = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *KeyValue) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *KeyValue) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"key\":"
+ out.RawString(prefix[1:])
+ out.String(x.Key)
+ }
+ {
+ const prefix string = ",\"value\":"
+ out.RawString(prefix)
+ out.Base64Bytes(x.Value)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *KeyValue) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *KeyValue) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "key":
+ {
+ var f string
+ f = in.String()
+ x.Key = f
+ }
+ case "value":
+ {
+ var f []byte
+ f = in.Bytes()
+ x.Value = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type LogMove struct {
+ ParentId uint64 `json:"parentID"`
+ Meta []byte `json:"meta"`
+ ChildId uint64 `json:"childID"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*LogMove)(nil)
+ _ encoding.ProtoUnmarshaler = (*LogMove)(nil)
+ _ json.Marshaler = (*LogMove)(nil)
+ _ json.Unmarshaler = (*LogMove)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -50,28 +199,183 @@ func (x *LogMove) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *LogMove) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.UInt64Marshal(1, buf[offset:], x.ParentId)
- offset += proto.BytesMarshal(2, buf[offset:], x.Meta)
- offset += proto.UInt64Marshal(3, buf[offset:], x.ChildId)
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *LogMove) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
}
+func (x *LogMove) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.ParentId != 0 {
+ mm.AppendUint64(1, x.ParentId)
+ }
+ if len(x.Meta) != 0 {
+ mm.AppendBytes(2, x.Meta)
+ }
+ if x.ChildId != 0 {
+ mm.AppendUint64(3, x.ChildId)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *LogMove) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "LogMove")
+ }
+ switch fc.FieldNum {
+ case 1: // ParentId
+ data, ok := fc.Uint64()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "ParentId")
+ }
+ x.ParentId = data
+ case 2: // Meta
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Meta")
+ }
+ x.Meta = data
+ case 3: // ChildId
+ data, ok := fc.Uint64()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "ChildId")
+ }
+ x.ChildId = data
+ }
+ }
+ return nil
+}
+func (x *LogMove) GetParentId() uint64 {
+ if x != nil {
+ return x.ParentId
+ }
+ return 0
+}
+func (x *LogMove) SetParentId(v uint64) {
+ x.ParentId = v
+}
+func (x *LogMove) GetMeta() []byte {
+ if x != nil {
+ return x.Meta
+ }
+ return nil
+}
+func (x *LogMove) SetMeta(v []byte) {
+ x.Meta = v
+}
+func (x *LogMove) GetChildId() uint64 {
+ if x != nil {
+ return x.ChildId
+ }
+ return 0
+}
+func (x *LogMove) SetChildId(v uint64) {
+ x.ChildId = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *LogMove) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *LogMove) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"parentID\":"
+ out.RawString(prefix[1:])
+ out.Uint64(x.ParentId)
+ }
+ {
+ const prefix string = ",\"meta\":"
+ out.RawString(prefix)
+ out.Base64Bytes(x.Meta)
+ }
+ {
+ const prefix string = ",\"childID\":"
+ out.RawString(prefix)
+ out.Uint64(x.ChildId)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *LogMove) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *LogMove) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "parentID":
+ {
+ var f uint64
+ f = in.Uint64()
+ x.ParentId = f
+ }
+ case "meta":
+ {
+ var f []byte
+ f = in.Bytes()
+ x.Meta = f
+ }
+ case "childID":
+ {
+ var f uint64
+ f = in.Uint64()
+ x.ChildId = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type Signature struct {
+ Key []byte `json:"key"`
+ Sign []byte `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*Signature)(nil)
+ _ encoding.ProtoUnmarshaler = (*Signature)(nil)
+ _ json.Marshaler = (*Signature)(nil)
+ _ json.Unmarshaler = (*Signature)(nil)
+)
+
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
@@ -84,23 +388,138 @@ func (x *Signature) StableSize() (size int) {
return size
}
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *Signature) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.BytesMarshal(1, buf[offset:], x.Key)
- offset += proto.BytesMarshal(2, buf[offset:], x.Sign)
- return buf
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *Signature) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *Signature) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if len(x.Key) != 0 {
+ mm.AppendBytes(1, x.Key)
+ }
+ if len(x.Sign) != 0 {
+ mm.AppendBytes(2, x.Sign)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *Signature) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "Signature")
+ }
+ switch fc.FieldNum {
+ case 1: // Key
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Key")
+ }
+ x.Key = data
+ case 2: // Sign
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Sign")
+ }
+ x.Sign = data
+ }
+ }
+ return nil
+}
+func (x *Signature) GetKey() []byte {
+ if x != nil {
+ return x.Key
+ }
+ return nil
+}
+func (x *Signature) SetKey(v []byte) {
+ x.Key = v
+}
+func (x *Signature) GetSign() []byte {
+ if x != nil {
+ return x.Sign
+ }
+ return nil
+}
+func (x *Signature) SetSign(v []byte) {
+ x.Sign = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *Signature) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *Signature) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"key\":"
+ out.RawString(prefix[1:])
+ out.Base64Bytes(x.Key)
+ }
+ {
+ const prefix string = ",\"signature\":"
+ out.RawString(prefix)
+ out.Base64Bytes(x.Sign)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *Signature) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *Signature) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "key":
+ {
+ var f []byte
+ f = in.Bytes()
+ x.Key = f
+ }
+ case "signature":
+ {
+ var f []byte
+ f = in.Bytes()
+ x.Sign = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
}
From 15dae8685e48bf9c271039bd109a21a1612e4a3b Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Mon, 19 Aug 2024 17:13:39 +0300
Subject: [PATCH 046/705] [#1323] metabase: Drop ownerID index
Signed-off-by: Dmitrii Stepanov
---
pkg/local_object_storage/metabase/put.go | 9 --
pkg/local_object_storage/metabase/select.go | 8 +-
.../metabase/select_test.go | 106 ++++++++++++++++++
pkg/local_object_storage/metabase/util.go | 9 +-
4 files changed, 113 insertions(+), 19 deletions(-)
diff --git a/pkg/local_object_storage/metabase/put.go b/pkg/local_object_storage/metabase/put.go
index f351cb485..366cc1361 100644
--- a/pkg/local_object_storage/metabase/put.go
+++ b/pkg/local_object_storage/metabase/put.go
@@ -379,15 +379,6 @@ func updateFKBTIndexes(tx *bbolt.Tx, obj *objectSDK.Object, f updateIndexItemFun
objKey := objectKey(id, make([]byte, objectKeySize))
key := make([]byte, bucketKeySize)
- err := f(tx, namedBucketItem{
- name: ownerBucketName(cnr, key),
- key: []byte(obj.OwnerID().EncodeToString()),
- val: objKey,
- })
- if err != nil {
- return err
- }
-
var attrs []objectSDK.Attribute
if obj.ECHeader() != nil {
attrs = obj.ECHeader().ParentAttributes()
diff --git a/pkg/local_object_storage/metabase/select.go b/pkg/local_object_storage/metabase/select.go
index 3a4d7a227..cc40b81ba 100644
--- a/pkg/local_object_storage/metabase/select.go
+++ b/pkg/local_object_storage/metabase/select.go
@@ -196,8 +196,7 @@ func (db *DB) selectFastFilter(
case v2object.FilterHeaderObjectID:
db.selectObjectID(tx, f, cnr, to, fNum, currEpoch)
case v2object.FilterHeaderOwnerID:
- bucketName := ownerBucketName(cnr, bucketName)
- db.selectFromFKBT(tx, bucketName, f, to, fNum)
+ return // moved to slow filters
case v2object.FilterHeaderPayloadHash:
bucketName := payloadHashBucketName(cnr, bucketName)
db.selectFromList(tx, bucketName, f, to, fNum)
@@ -510,6 +509,8 @@ func (db *DB) matchSlowFilters(tx *bbolt.Tx, addr oid.Address, f objectSDK.Searc
case v2object.FilterHeaderPayloadLength:
data = make([]byte, 8)
binary.LittleEndian.PutUint64(data, obj.PayloadSize())
+ case v2object.FilterHeaderOwnerID:
+ data = []byte(obj.OwnerID().EncodeToString())
default:
continue // ignore unknown search attributes
}
@@ -544,7 +545,8 @@ func groupFilters(filters objectSDK.SearchFilters) (filterGroup, error) {
v2object.FilterHeaderVersion,
v2object.FilterHeaderCreationEpoch,
v2object.FilterHeaderPayloadLength,
- v2object.FilterHeaderHomomorphicHash:
+ v2object.FilterHeaderHomomorphicHash,
+ v2object.FilterHeaderOwnerID:
res.slowFilters = append(res.slowFilters, filters[i])
default: // fast filters or user attributes if unknown
res.fastFilters = append(res.fastFilters, filters[i])
diff --git a/pkg/local_object_storage/metabase/select_test.go b/pkg/local_object_storage/metabase/select_test.go
index 8f9294d07..4fbc5910e 100644
--- a/pkg/local_object_storage/metabase/select_test.go
+++ b/pkg/local_object_storage/metabase/select_test.go
@@ -633,6 +633,112 @@ func TestDB_SelectObjectID(t *testing.T) {
})
}
+func TestDB_SelectOwnerID(t *testing.T) {
+ t.Parallel()
+
+ db := newDB(t)
+ defer func() { require.NoError(t, db.Close()) }()
+
+ cnr := cidtest.ID()
+
+ // prepare
+
+ parent := testutil.GenerateObjectWithCID(cnr)
+
+ regular := testutil.GenerateObjectWithCID(cnr)
+ idParent, _ := parent.ID()
+ regular.SetParentID(idParent)
+ regular.SetParent(parent)
+
+ err := putBig(db, regular)
+ require.NoError(t, err)
+
+ ts := testutil.GenerateObjectWithCID(cnr)
+ ts.SetType(objectSDK.TypeTombstone)
+ err = putBig(db, ts)
+ require.NoError(t, err)
+
+ lock := testutil.GenerateObjectWithCID(cnr)
+ lock.SetType(objectSDK.TypeLock)
+ err = putBig(db, lock)
+ require.NoError(t, err)
+
+ t.Run("not found objects", func(t *testing.T) {
+ raw := testutil.GenerateObjectWithCID(cnr)
+
+ fs := objectSDK.SearchFilters{}
+ fs.AddObjectOwnerIDFilter(objectSDK.MatchStringEqual, raw.OwnerID())
+
+ testSelect(t, db, cnr, fs)
+
+ fs = objectSDK.SearchFilters{}
+ fs.AddObjectOwnerIDFilter(objectSDK.MatchStringNotEqual, raw.OwnerID())
+
+ testSelect(t, db, cnr, fs,
+ object.AddressOf(regular),
+ object.AddressOf(parent),
+ object.AddressOf(ts),
+ object.AddressOf(lock),
+ )
+ })
+
+ t.Run("regular objects", func(t *testing.T) {
+ fs := objectSDK.SearchFilters{}
+ fs.AddObjectOwnerIDFilter(objectSDK.MatchStringEqual, regular.OwnerID())
+ testSelect(t, db, cnr, fs, object.AddressOf(regular))
+
+ fs = objectSDK.SearchFilters{}
+ fs.AddObjectOwnerIDFilter(objectSDK.MatchStringNotEqual, regular.OwnerID())
+ testSelect(t, db, cnr, fs,
+ object.AddressOf(parent),
+ object.AddressOf(ts),
+ object.AddressOf(lock),
+ )
+ })
+
+ t.Run("tombstone objects", func(t *testing.T) {
+ fs := objectSDK.SearchFilters{}
+ fs.AddObjectOwnerIDFilter(objectSDK.MatchStringEqual, ts.OwnerID())
+ testSelect(t, db, cnr, fs, object.AddressOf(ts))
+
+ fs = objectSDK.SearchFilters{}
+ fs.AddObjectOwnerIDFilter(objectSDK.MatchStringNotEqual, ts.OwnerID())
+ testSelect(t, db, cnr, fs,
+ object.AddressOf(regular),
+ object.AddressOf(parent),
+ object.AddressOf(lock),
+ )
+ })
+
+ t.Run("parent objects", func(t *testing.T) {
+ fs := objectSDK.SearchFilters{}
+ fs.AddObjectOwnerIDFilter(objectSDK.MatchStringEqual, parent.OwnerID())
+ testSelect(t, db, cnr, fs, object.AddressOf(parent))
+
+ fs = objectSDK.SearchFilters{}
+ fs.AddObjectOwnerIDFilter(objectSDK.MatchStringNotEqual, parent.OwnerID())
+ testSelect(t, db, cnr, fs,
+ object.AddressOf(regular),
+ object.AddressOf(ts),
+ object.AddressOf(lock),
+ )
+ })
+
+ t.Run("lock objects", func(t *testing.T) {
+ fs := objectSDK.SearchFilters{}
+ fs.AddObjectOwnerIDFilter(objectSDK.MatchStringEqual, lock.OwnerID())
+ testSelect(t, db, cnr, fs, object.AddressOf(lock))
+
+ fs = objectSDK.SearchFilters{}
+ fs.AddObjectOwnerIDFilter(objectSDK.MatchStringNotEqual, lock.OwnerID())
+ testSelect(t, db, cnr, fs,
+ object.AddressOf(regular),
+ object.AddressOf(parent),
+ object.AddressOf(ts),
+ )
+ })
+}
+
type testTarget struct {
objects []*objectSDK.Object
}
diff --git a/pkg/local_object_storage/metabase/util.go b/pkg/local_object_storage/metabase/util.go
index 9249ae49b..f9f9898e9 100644
--- a/pkg/local_object_storage/metabase/util.go
+++ b/pkg/local_object_storage/metabase/util.go
@@ -89,10 +89,10 @@ const (
// FKBT index buckets.
// ====================
- // ownerPrefix is used for prefixing FKBT index buckets mapping owner to object IDs.
+ // ownerPrefix was used for prefixing FKBT index buckets mapping owner to object IDs.
// Key: owner ID
// Value: bucket containing object IDs as keys
- ownerPrefix
+ _
// userAttributePrefix is used for prefixing FKBT index buckets containing objects.
// Key: attribute value
// Value: bucket containing object IDs as keys
@@ -180,11 +180,6 @@ func rootBucketName(cnr cid.ID, key []byte) []byte {
return bucketName(cnr, rootPrefix, key)
}
-// ownerBucketName returns _ownerid.
-func ownerBucketName(cnr cid.ID, key []byte) []byte {
- return bucketName(cnr, ownerPrefix, key)
-}
-
// parentBucketName returns _parent.
func parentBucketName(cnr cid.ID, key []byte) []byte {
return bucketName(cnr, parentPrefix, key)
From 2542d4f5dfda623a42ae144cc28ebfeb3d16ea89 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Mon, 19 Aug 2024 17:20:55 +0300
Subject: [PATCH 047/705] [#1323] metabase: Drop payload checksum index
Signed-off-by: Dmitrii Stepanov
---
pkg/local_object_storage/metabase/put.go | 12 ------------
pkg/local_object_storage/metabase/select.go | 12 +++++++-----
pkg/local_object_storage/metabase/util.go | 9 ++-------
3 files changed, 9 insertions(+), 24 deletions(-)
diff --git a/pkg/local_object_storage/metabase/put.go b/pkg/local_object_storage/metabase/put.go
index 366cc1361..3fa79f1e2 100644
--- a/pkg/local_object_storage/metabase/put.go
+++ b/pkg/local_object_storage/metabase/put.go
@@ -297,18 +297,6 @@ func updateListIndexes(tx *bbolt.Tx, obj *objectSDK.Object, f updateIndexItemFun
objKey := objectKey(idObj, make([]byte, objectKeySize))
bucketName := make([]byte, bucketKeySize)
- cs, _ := obj.PayloadChecksum()
-
- // index payload hashes
- err := f(tx, namedBucketItem{
- name: payloadHashBucketName(cnr, bucketName),
- key: cs.Value(),
- val: objKey,
- })
- if err != nil {
- return err
- }
-
idParent, ok := obj.ParentID()
// index parent ids
diff --git a/pkg/local_object_storage/metabase/select.go b/pkg/local_object_storage/metabase/select.go
index cc40b81ba..720b7b5b9 100644
--- a/pkg/local_object_storage/metabase/select.go
+++ b/pkg/local_object_storage/metabase/select.go
@@ -195,11 +195,9 @@ func (db *DB) selectFastFilter(
switch f.Header() {
case v2object.FilterHeaderObjectID:
db.selectObjectID(tx, f, cnr, to, fNum, currEpoch)
- case v2object.FilterHeaderOwnerID:
+ case v2object.FilterHeaderOwnerID,
+ v2object.FilterHeaderPayloadHash:
return // moved to slow filters
- case v2object.FilterHeaderPayloadHash:
- bucketName := payloadHashBucketName(cnr, bucketName)
- db.selectFromList(tx, bucketName, f, to, fNum)
case v2object.FilterHeaderObjectType:
for _, bucketName := range bucketNamesForType(cnr, f.Operation(), f.Value()) {
selectAllFromBucket(tx, bucketName, to, fNum)
@@ -511,6 +509,9 @@ func (db *DB) matchSlowFilters(tx *bbolt.Tx, addr oid.Address, f objectSDK.Searc
binary.LittleEndian.PutUint64(data, obj.PayloadSize())
case v2object.FilterHeaderOwnerID:
data = []byte(obj.OwnerID().EncodeToString())
+ case v2object.FilterHeaderPayloadHash:
+ cs, _ := obj.PayloadChecksum()
+ data = cs.Value()
default:
continue // ignore unknown search attributes
}
@@ -546,7 +547,8 @@ func groupFilters(filters objectSDK.SearchFilters) (filterGroup, error) {
v2object.FilterHeaderCreationEpoch,
v2object.FilterHeaderPayloadLength,
v2object.FilterHeaderHomomorphicHash,
- v2object.FilterHeaderOwnerID:
+ v2object.FilterHeaderOwnerID,
+ v2object.FilterHeaderPayloadHash:
res.slowFilters = append(res.slowFilters, filters[i])
default: // fast filters or user attributes if unknown
res.fastFilters = append(res.fastFilters, filters[i])
diff --git a/pkg/local_object_storage/metabase/util.go b/pkg/local_object_storage/metabase/util.go
index f9f9898e9..ebf1713d0 100644
--- a/pkg/local_object_storage/metabase/util.go
+++ b/pkg/local_object_storage/metabase/util.go
@@ -102,10 +102,10 @@ const (
// List index buckets.
// ====================
- // payloadHashPrefix is used for prefixing List index buckets mapping payload hash to a list of object IDs.
+ // payloadHashPrefix was used for prefixing List index buckets mapping payload hash to a list of object IDs.
// Key: payload hash
// Value: list of object IDs
- payloadHashPrefix
+ _
// parentPrefix is used for prefixing List index buckets mapping parent ID to a list of children IDs.
// Key: parent ID
// Value: list of object IDs
@@ -170,11 +170,6 @@ func cidFromAttributeBucket(val []byte, attributeKey string) []byte {
return val[1:bucketKeySize]
}
-// payloadHashBucketName returns _payloadhash.
-func payloadHashBucketName(cnr cid.ID, key []byte) []byte {
- return bucketName(cnr, payloadHashPrefix, key)
-}
-
// rootBucketName returns _root.
func rootBucketName(cnr cid.ID, key []byte) []byte {
return bucketName(cnr, rootPrefix, key)
From 7bf20c9f1f06975a53184c0803dbc7bc5ca4eaf5 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Tue, 20 Aug 2024 11:59:42 +0300
Subject: [PATCH 048/705] [#1323] metabase: Add expiration epoch buckets
Signed-off-by: Dmitrii Stepanov
---
pkg/local_object_storage/metabase/control.go | 1 +
pkg/local_object_storage/metabase/delete.go | 11 ++
.../metabase/delete_ec_test.go | 3 +-
pkg/local_object_storage/metabase/exists.go | 26 ++--
pkg/local_object_storage/metabase/expired.go | 118 +++++-------------
pkg/local_object_storage/metabase/get.go | 6 +-
.../metabase/iterators.go | 81 ++++--------
pkg/local_object_storage/metabase/put.go | 41 ++++++
pkg/local_object_storage/metabase/select.go | 7 +-
pkg/local_object_storage/metabase/util.go | 53 ++++++--
pkg/local_object_storage/shard/gc_test.go | 4 +-
11 files changed, 181 insertions(+), 170 deletions(-)
diff --git a/pkg/local_object_storage/metabase/control.go b/pkg/local_object_storage/metabase/control.go
index 891a1e9b2..d6546d922 100644
--- a/pkg/local_object_storage/metabase/control.go
+++ b/pkg/local_object_storage/metabase/control.go
@@ -29,6 +29,7 @@ var (
string(garbageBucketName): {},
string(shardInfoBucket): {},
string(bucketNameLocked): {},
+ string(expEpochToObjectBucketName): {},
}
// deprecatedBuckets buckets that are not used anymore.
diff --git a/pkg/local_object_storage/metabase/delete.go b/pkg/local_object_storage/metabase/delete.go
index ae10564a8..683bd445f 100644
--- a/pkg/local_object_storage/metabase/delete.go
+++ b/pkg/local_object_storage/metabase/delete.go
@@ -478,6 +478,17 @@ func delUniqueIndexes(tx *bbolt.Tx, obj *objectSDK.Object, isParent bool) error
key: objKey,
})
+ if expEpoch, ok := hasExpirationEpoch(obj); ok {
+ delUniqueIndexItem(tx, namedBucketItem{
+ name: expEpochToObjectBucketName,
+ key: expirationEpochKey(expEpoch, cnr, addr.Object()),
+ })
+ delUniqueIndexItem(tx, namedBucketItem{
+ name: objectToExpirationEpochBucketName(cnr, make([]byte, bucketKeySize)),
+ key: objKey,
+ })
+ }
+
return nil
}
diff --git a/pkg/local_object_storage/metabase/delete_ec_test.go b/pkg/local_object_storage/metabase/delete_ec_test.go
index 0e627f095..66c79ecd7 100644
--- a/pkg/local_object_storage/metabase/delete_ec_test.go
+++ b/pkg/local_object_storage/metabase/delete_ec_test.go
@@ -418,7 +418,8 @@ func testVerifyNoObjectDataLeft(tx *bbolt.Tx) error {
return tx.ForEach(func(name []byte, b *bbolt.Bucket) error {
if bytes.Equal(name, shardInfoBucket) ||
bytes.Equal(name, containerCounterBucketName) ||
- bytes.Equal(name, containerVolumeBucketName) {
+ bytes.Equal(name, containerVolumeBucketName) ||
+ bytes.Equal(name, expEpochToObjectBucketName) {
return nil
}
return testBucketEmpty(name, b)
diff --git a/pkg/local_object_storage/metabase/exists.go b/pkg/local_object_storage/metabase/exists.go
index 153d92110..2e1b1dce8 100644
--- a/pkg/local_object_storage/metabase/exists.go
+++ b/pkg/local_object_storage/metabase/exists.go
@@ -5,7 +5,6 @@ import (
"fmt"
"time"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
@@ -96,7 +95,11 @@ func (db *DB) exists(tx *bbolt.Tx, addr oid.Address, parent oid.Address, currEpo
locked = objectLocked(tx, parent.Container(), parent.Object())
}
// check graveyard and object expiration first
- switch objectStatus(tx, addr, currEpoch) {
+ st, err := objectStatus(tx, addr, currEpoch)
+ if err != nil {
+ return false, false, err
+ }
+ switch st {
case 1:
return false, locked, logicerr.Wrap(new(apistatus.ObjectNotFound))
case 2:
@@ -138,30 +141,25 @@ func (db *DB) exists(tx *bbolt.Tx, addr oid.Address, parent oid.Address, currEpo
// - 1 if object with GC mark;
// - 2 if object is covered with tombstone;
// - 3 if object is expired.
-func objectStatus(tx *bbolt.Tx, addr oid.Address, currEpoch uint64) uint8 {
+func objectStatus(tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (uint8, error) {
// locked object could not be removed/marked with GC/expired
if objectLocked(tx, addr.Container(), addr.Object()) {
- return 0
+ return 0, nil
}
- // we check only if the object is expired in the current
- // epoch since it is considered the only corner case: the
- // GC is expected to collect all the objects that have
- // expired previously for less than the one epoch duration
-
- expired := isExpiredWithAttribute(tx, objectV2.SysAttributeExpEpoch, addr, currEpoch)
- if !expired {
- expired = isExpiredWithAttribute(tx, objectV2.SysAttributeExpEpochNeoFS, addr, currEpoch)
+ expired, err := isExpired(tx, addr, currEpoch)
+ if err != nil {
+ return 0, err
}
if expired {
- return 3
+ return 3, nil
}
graveyardBkt := tx.Bucket(graveyardBucketName)
garbageBkt := tx.Bucket(garbageBucketName)
addrKey := addressKey(addr, make([]byte, addressKeySize))
- return inGraveyardWithKey(addrKey, graveyardBkt, garbageBkt)
+ return inGraveyardWithKey(addrKey, graveyardBkt, garbageBkt), nil
}
func inGraveyardWithKey(addrKey []byte, graveyard, garbageBCK *bbolt.Bucket) uint8 {
diff --git a/pkg/local_object_storage/metabase/expired.go b/pkg/local_object_storage/metabase/expired.go
index aa2cb6f20..68144d8b1 100644
--- a/pkg/local_object_storage/metabase/expired.go
+++ b/pkg/local_object_storage/metabase/expired.go
@@ -2,12 +2,11 @@ package meta
import (
"context"
+ "encoding/binary"
"errors"
- "fmt"
"strconv"
"time"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
@@ -17,6 +16,8 @@ import (
"go.opentelemetry.io/otel/trace"
)
+var errInvalidEpochValueLength = errors.New("could not parse expiration epoch: invalid data length")
+
// FilterExpired return expired items from addresses.
// Address considered expired if metabase does contain information about expiration and
// expiration epoch is less than epoch.
@@ -57,29 +58,11 @@ func (db *DB) FilterExpired(ctx context.Context, epoch uint64, addresses []oid.A
default:
}
- expiredNeoFS, err := selectExpiredObjectIDs(tx, objectV2.SysAttributeExpEpochNeoFS, epoch, containerID, objectIDs)
+ expired, err := selectExpiredObjects(tx, epoch, containerID, objectIDs)
if err != nil {
return err
}
-
- expiredSys, err := selectExpiredObjectIDs(tx, objectV2.SysAttributeExpEpoch, epoch, containerID, objectIDs)
- if err != nil {
- return err
- }
-
- for _, o := range expiredNeoFS {
- var a oid.Address
- a.SetContainer(containerID)
- a.SetObject(o)
- result = append(result, a)
- }
-
- for _, o := range expiredSys {
- var a oid.Address
- a.SetContainer(containerID)
- a.SetObject(o)
- result = append(result, a)
- }
+ result = append(result, expired...)
}
return nil
})
@@ -90,76 +73,39 @@ func (db *DB) FilterExpired(ctx context.Context, epoch uint64, addresses []oid.A
return result, nil
}
-func isExpiredWithAttribute(tx *bbolt.Tx, attr string, addr oid.Address, currEpoch uint64) bool {
- // bucket with objects that have expiration attr
- attrKey := make([]byte, bucketKeySize+len(attr))
- expirationBucket := tx.Bucket(attributeBucketName(addr.Container(), attr, attrKey))
- if expirationBucket != nil {
- // bucket that contains objects that expire in the current epoch
- prevEpochBkt := expirationBucket.Bucket([]byte(strconv.FormatUint(currEpoch-1, 10)))
- if prevEpochBkt != nil {
- rawOID := objectKey(addr.Object(), make([]byte, objectKeySize))
- if prevEpochBkt.Get(rawOID) != nil {
- return true
- }
- }
+func isExpired(tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (bool, error) {
+ bucketName := make([]byte, bucketKeySize)
+ bucketName = objectToExpirationEpochBucketName(addr.Container(), bucketName)
+ b := tx.Bucket(bucketName)
+ if b == nil {
+ return false, nil
}
-
- return false
+ key := make([]byte, objectKeySize)
+ addr.Object().Encode(key)
+ val := b.Get(key)
+ if len(val) == 0 {
+ return false, nil
+ }
+ if len(val) != epochSize {
+ return false, errInvalidEpochValueLength
+ }
+ expEpoch := binary.LittleEndian.Uint64(val)
+ return expEpoch < currEpoch, nil
}
-func selectExpiredObjectIDs(tx *bbolt.Tx, attr string, epoch uint64, containerID cid.ID, objectIDs []oid.ID) ([]oid.ID, error) {
- result := make([]oid.ID, 0)
- notResolved := make(map[oid.ID]struct{})
- for _, oid := range objectIDs {
- notResolved[oid] = struct{}{}
- }
-
- expiredBuffer := make([]oid.ID, 0)
- objectKeyBuffer := make([]byte, objectKeySize)
-
- expirationBucketKey := make([]byte, bucketKeySize+len(attr))
- expirationBucket := tx.Bucket(attributeBucketName(containerID, attr, expirationBucketKey))
- if expirationBucket == nil {
- return result, nil // all not expired
- }
-
- err := expirationBucket.ForEach(func(epochExpBucketKey, _ []byte) error {
- bucketExpiresAfter, err := strconv.ParseUint(string(epochExpBucketKey), 10, 64)
+func selectExpiredObjects(tx *bbolt.Tx, epoch uint64, containerID cid.ID, objectIDs []oid.ID) ([]oid.Address, error) {
+ result := make([]oid.Address, 0)
+ var addr oid.Address
+ addr.SetContainer(containerID)
+ for _, objID := range objectIDs {
+ addr.SetObject(objID)
+ expired, err := isExpired(tx, addr, epoch)
if err != nil {
- return fmt.Errorf("could not parse expiration epoch: %w", err)
- } else if bucketExpiresAfter >= epoch {
- return nil
+ return nil, err
}
-
- epochExpirationBucket := expirationBucket.Bucket(epochExpBucketKey)
- if epochExpirationBucket == nil {
- return nil
+ if expired {
+ result = append(result, addr)
}
-
- expiredBuffer = expiredBuffer[:0]
- for oid := range notResolved {
- key := objectKey(oid, objectKeyBuffer)
- if epochExpirationBucket.Get(key) != nil {
- expiredBuffer = append(expiredBuffer, oid)
- }
- }
-
- for _, oid := range expiredBuffer {
- delete(notResolved, oid)
- result = append(result, oid)
- }
-
- if len(notResolved) == 0 {
- return errBreakBucketForEach
- }
-
- return nil
- })
-
- if err != nil && !errors.Is(err, errBreakBucketForEach) {
- return nil, err
}
-
return result, nil
}
diff --git a/pkg/local_object_storage/metabase/get.go b/pkg/local_object_storage/metabase/get.go
index d979b4f0f..776f5d27c 100644
--- a/pkg/local_object_storage/metabase/get.go
+++ b/pkg/local_object_storage/metabase/get.go
@@ -90,7 +90,11 @@ func (db *DB) Get(ctx context.Context, prm GetPrm) (res GetRes, err error) {
func (db *DB) get(tx *bbolt.Tx, addr oid.Address, key []byte, checkStatus, raw bool, currEpoch uint64) (*objectSDK.Object, error) {
if checkStatus {
- switch objectStatus(tx, addr, currEpoch) {
+ st, err := objectStatus(tx, addr, currEpoch)
+ if err != nil {
+ return nil, err
+ }
+ switch st {
case 1:
return nil, logicerr.Wrap(new(apistatus.ObjectNotFound))
case 2:
diff --git a/pkg/local_object_storage/metabase/iterators.go b/pkg/local_object_storage/metabase/iterators.go
index 7b60b7d50..d44c51fb2 100644
--- a/pkg/local_object_storage/metabase/iterators.go
+++ b/pkg/local_object_storage/metabase/iterators.go
@@ -7,7 +7,6 @@ import (
"strconv"
"time"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
@@ -79,63 +78,37 @@ func (db *DB) IterateExpired(ctx context.Context, epoch uint64, h ExpiredObjectH
}
func (db *DB) iterateExpired(tx *bbolt.Tx, epoch uint64, h ExpiredObjectHandler) error {
- err := tx.ForEach(func(name []byte, b *bbolt.Bucket) error {
- cidBytes := cidFromAttributeBucket(name, objectV2.SysAttributeExpEpoch)
- if cidBytes == nil {
- cidBytes = cidFromAttributeBucket(name, objectV2.SysAttributeExpEpochNeoFS)
- if cidBytes == nil {
- return nil
- }
- }
-
- var cnrID cid.ID
- err := cnrID.Decode(cidBytes)
+ b := tx.Bucket(expEpochToObjectBucketName)
+ c := b.Cursor()
+ for k, _ := c.First(); k != nil; k, _ = c.Next() {
+ expiresAfter, cnr, obj, err := parseExpirationEpochKey(k)
if err != nil {
- return fmt.Errorf("could not parse container ID of expired bucket: %w", err)
+ return err
}
-
- return b.ForEachBucket(func(expKey []byte) error {
- bktExpired := b.Bucket(expKey)
- expiresAfter, err := strconv.ParseUint(string(expKey), 10, 64)
- if err != nil {
- return fmt.Errorf("could not parse expiration epoch: %w", err)
- } else if expiresAfter >= epoch {
- return nil
- }
-
- return bktExpired.ForEach(func(idKey, _ []byte) error {
- var id oid.ID
-
- err = id.Decode(idKey)
- if err != nil {
- return fmt.Errorf("could not parse ID of expired object: %w", err)
- }
-
- // Ignore locked objects.
- //
- // To slightly optimize performance we can check only REGULAR objects
- // (only they can be locked), but it's more reliable.
- if objectLocked(tx, cnrID, id) {
- return nil
- }
-
- var addr oid.Address
- addr.SetContainer(cnrID)
- addr.SetObject(id)
-
- return h(&ExpiredObject{
- typ: firstIrregularObjectType(tx, cnrID, idKey),
- addr: addr,
- })
- })
+ // bucket keys ordered by epoch, no need to continue lookup
+ if expiresAfter >= epoch {
+ return nil
+ }
+ if objectLocked(tx, cnr, obj) {
+ continue
+ }
+ var addr oid.Address
+ addr.SetContainer(cnr)
+ addr.SetObject(obj)
+ objKey := objectKey(addr.Object(), make([]byte, objectKeySize))
+ err = h(&ExpiredObject{
+ typ: firstIrregularObjectType(tx, cnr, objKey),
+ addr: addr,
})
- })
-
- if errors.Is(err, ErrInterruptIterator) {
- err = nil
+ if err == nil {
+ continue
+ }
+ if errors.Is(err, ErrInterruptIterator) {
+ return nil
+ }
+ return err
}
-
- return err
+ return nil
}
// IterateCoveredByTombstones iterates over all objects in DB which are covered
diff --git a/pkg/local_object_storage/metabase/put.go b/pkg/local_object_storage/metabase/put.go
index 3fa79f1e2..d1706a7ab 100644
--- a/pkg/local_object_storage/metabase/put.go
+++ b/pkg/local_object_storage/metabase/put.go
@@ -6,8 +6,10 @@ import (
"errors"
"fmt"
gio "io"
+ "strconv"
"time"
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
storagelog "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/log"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
@@ -242,6 +244,27 @@ func putUniqueIndexes(
}
}
+ if expEpoch, ok := hasExpirationEpoch(obj); ok {
+ err := putUniqueIndexItem(tx, namedBucketItem{
+ name: expEpochToObjectBucketName,
+ key: expirationEpochKey(expEpoch, cnr, addr.Object()),
+ val: zeroValue,
+ })
+ if err != nil {
+ return err
+ }
+ val := make([]byte, epochSize)
+ binary.LittleEndian.PutUint64(val, expEpoch)
+ err = putUniqueIndexItem(tx, namedBucketItem{
+ name: objectToExpirationEpochBucketName(cnr, make([]byte, bucketKeySize)),
+ key: objKey,
+ val: val,
+ })
+ if err != nil {
+ return err
+ }
+ }
+
// index root object
if obj.Type() == objectSDK.TypeRegular && !obj.HasParent() {
if ecHead := obj.ECHeader(); ecHead != nil {
@@ -361,6 +384,24 @@ func updateListIndexes(tx *bbolt.Tx, obj *objectSDK.Object, f updateIndexItemFun
return nil
}
+func hasExpirationEpoch(obj *objectSDK.Object) (uint64, bool) {
+ attributes := obj.Attributes()
+ if ech := obj.ECHeader(); ech != nil {
+ attributes = ech.ParentAttributes()
+ }
+ for _, attr := range attributes {
+ if attr.Key() == objectV2.SysAttributeExpEpochNeoFS {
+ expEpoch, err := strconv.ParseUint(attr.Value(), 10, 64)
+ return expEpoch, err == nil
+ }
+ if attr.Key() == objectV2.SysAttributeExpEpoch {
+ expEpoch, err := strconv.ParseUint(attr.Value(), 10, 64)
+ return expEpoch, err == nil
+ }
+ }
+ return 0, false
+}
+
func updateFKBTIndexes(tx *bbolt.Tx, obj *objectSDK.Object, f updateIndexItemFunc) error {
id, _ := obj.ID()
cnr, _ := obj.ContainerID()
diff --git a/pkg/local_object_storage/metabase/select.go b/pkg/local_object_storage/metabase/select.go
index 720b7b5b9..42737a41a 100644
--- a/pkg/local_object_storage/metabase/select.go
+++ b/pkg/local_object_storage/metabase/select.go
@@ -142,8 +142,11 @@ func (db *DB) selectObjects(tx *bbolt.Tx, cnr cid.ID, fs objectSDK.SearchFilters
var addr oid.Address
addr.SetContainer(cnr)
addr.SetObject(id)
-
- if objectStatus(tx, addr, currEpoch) > 0 {
+ st, err := objectStatus(tx, addr, currEpoch)
+ if err != nil {
+ return nil, err
+ }
+ if st > 0 {
continue // ignore removed objects
}
diff --git a/pkg/local_object_storage/metabase/util.go b/pkg/local_object_storage/metabase/util.go
index ebf1713d0..012c0dcc8 100644
--- a/pkg/local_object_storage/metabase/util.go
+++ b/pkg/local_object_storage/metabase/util.go
@@ -1,9 +1,10 @@
package meta
import (
- "bytes"
"crypto/sha256"
+ "encoding/binary"
"errors"
+ "fmt"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@@ -23,6 +24,7 @@ var (
toMoveItBucketName = []byte{toMoveItPrefix}
containerVolumeBucketName = []byte{containerVolumePrefix}
containerCounterBucketName = []byte{containerCountersPrefix}
+ expEpochToObjectBucketName = []byte{expirationEpochToObjectPrefix}
zeroValue = []byte{0xFF}
@@ -124,6 +126,16 @@ const (
// Key: container ID + type
// Value: Object id
ecInfoPrefix
+
+ // expirationEpochToObjectPrefix is used for storing relation between expiration epoch and object id.
+ // Key: expiration epoch + object address
+ // Value: zero
+ expirationEpochToObjectPrefix
+
+ // objectToExpirationEpochPrefix is used for storing relation between expiration epoch and object id.
+ // Key: object address
+ // Value: expiration epoch
+ objectToExpirationEpochPrefix
)
const (
@@ -131,6 +143,7 @@ const (
bucketKeySize = 1 + cidSize
objectKeySize = sha256.Size
addressKeySize = cidSize + objectKeySize
+ epochSize = 8
)
func bucketName(cnr cid.ID, prefix byte, key []byte) []byte {
@@ -161,15 +174,6 @@ func attributeBucketName(cnr cid.ID, attributeKey string, key []byte) []byte {
return append(key[:bucketKeySize], attributeKey...)
}
-// returns from attributeBucketName result, nil otherwise.
-func cidFromAttributeBucket(val []byte, attributeKey string) []byte {
- if len(val) < bucketKeySize || val[0] != userAttributePrefix || !bytes.Equal(val[bucketKeySize:], []byte(attributeKey)) {
- return nil
- }
-
- return val[1:bucketKeySize]
-}
-
// rootBucketName returns _root.
func rootBucketName(cnr cid.ID, key []byte) []byte {
return bucketName(cnr, rootPrefix, key)
@@ -190,6 +194,35 @@ func ecInfoBucketName(cnr cid.ID, key []byte) []byte {
return bucketName(cnr, ecInfoPrefix, key)
}
+// objectToExpirationEpochBucketName returns objectToExpirationEpochPrefix_.
+func objectToExpirationEpochBucketName(cnr cid.ID, key []byte) []byte {
+ return bucketName(cnr, objectToExpirationEpochPrefix, key)
+}
+
+func expirationEpochKey(epoch uint64, cnr cid.ID, obj oid.ID) []byte {
+ result := make([]byte, epochSize+addressKeySize)
+ binary.BigEndian.PutUint64(result, epoch)
+ cnr.Encode(result[epochSize:])
+ obj.Encode(result[epochSize+cidSize:])
+ return result
+}
+
+func parseExpirationEpochKey(key []byte) (uint64, cid.ID, oid.ID, error) {
+ if len(key) != epochSize+addressKeySize {
+ return 0, cid.ID{}, oid.ID{}, fmt.Errorf("unexpected expiration epoch to object key length: %d", len(key))
+ }
+ epoch := binary.BigEndian.Uint64(key)
+ var cnr cid.ID
+ if err := cnr.Decode(key[epochSize : epochSize+cidSize]); err != nil {
+ return 0, cid.ID{}, oid.ID{}, fmt.Errorf("failed to decode expiration epoch to object key (container ID): %w", err)
+ }
+ var obj oid.ID
+ if err := obj.Decode(key[epochSize+cidSize:]); err != nil {
+ return 0, cid.ID{}, oid.ID{}, fmt.Errorf("failed to decode expiration epoch to object key (object ID): %w", err)
+ }
+ return epoch, cnr, obj, nil
+}
+
// addressKey returns key for K-V tables when key is a whole address.
func addressKey(addr oid.Address, key []byte) []byte {
addr.Container().Encode(key)
diff --git a/pkg/local_object_storage/shard/gc_test.go b/pkg/local_object_storage/shard/gc_test.go
index a438b5def..90958cd35 100644
--- a/pkg/local_object_storage/shard/gc_test.go
+++ b/pkg/local_object_storage/shard/gc_test.go
@@ -74,7 +74,7 @@ func Test_GCDropsLockedExpiredSimpleObject(t *testing.T) {
var getPrm GetPrm
getPrm.SetAddress(objectCore.AddressOf(obj))
_, err = sh.Get(context.Background(), getPrm)
- require.True(t, client.IsErrObjectNotFound(err), "expired object must be deleted")
+ require.True(t, client.IsErrObjectNotFound(err) || IsErrObjectExpired(err), "expired object must be deleted")
}
func Test_GCDropsLockedExpiredComplexObject(t *testing.T) {
@@ -168,7 +168,7 @@ func Test_GCDropsLockedExpiredComplexObject(t *testing.T) {
sh.gc.handleEvent(context.Background(), EventNewEpoch(epoch.Value))
_, err = sh.Get(context.Background(), getPrm)
- require.True(t, client.IsErrObjectNotFound(err), "expired complex object must be deleted on epoch after lock expires")
+ require.True(t, client.IsErrObjectNotFound(err) || IsErrObjectExpired(err), "expired complex object must be deleted on epoch after lock expires")
}
func TestGCDropsObjectInhumedFromWritecache(t *testing.T) {
From 0f08a2efba6a39ec8a1c8f733729d510e3ea36bd Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Tue, 20 Aug 2024 12:21:39 +0300
Subject: [PATCH 049/705] [#1323] metabase: Resolve funlen linter
Signed-off-by: Dmitrii Stepanov
---
pkg/local_object_storage/metabase/put.go | 74 ++++++++++++------------
1 file changed, 38 insertions(+), 36 deletions(-)
diff --git a/pkg/local_object_storage/metabase/put.go b/pkg/local_object_storage/metabase/put.go
index d1706a7ab..015f87f92 100644
--- a/pkg/local_object_storage/metabase/put.go
+++ b/pkg/local_object_storage/metabase/put.go
@@ -197,46 +197,17 @@ func (db *DB) insertObject(tx *bbolt.Tx, obj *objectSDK.Object, id []byte, si *o
return nil
}
-func putUniqueIndexes(
- tx *bbolt.Tx,
- obj *objectSDK.Object,
- si *objectSDK.SplitInfo,
- id []byte,
-) error {
+func putUniqueIndexes(tx *bbolt.Tx, obj *objectSDK.Object, si *objectSDK.SplitInfo, id []byte) error {
isParent := si != nil
addr := objectCore.AddressOf(obj)
- cnr := addr.Container()
objKey := objectKey(addr.Object(), make([]byte, objectKeySize))
bucketName := make([]byte, bucketKeySize)
- // add value to primary unique bucket
if !isParent {
- switch obj.Type() {
- case objectSDK.TypeRegular:
- bucketName = primaryBucketName(cnr, bucketName)
- case objectSDK.TypeTombstone:
- bucketName = tombstoneBucketName(cnr, bucketName)
- case objectSDK.TypeLock:
- bucketName = bucketNameLockers(cnr, bucketName)
- default:
- return ErrUnknownObjectType
- }
-
- rawObject, err := obj.CutPayload().Marshal()
- if err != nil {
- return fmt.Errorf("can't marshal object header: %w", err)
- }
-
- err = putUniqueIndexItem(tx, namedBucketItem{
- name: bucketName,
- key: objKey,
- val: rawObject,
- })
+ err := putRawObjectData(tx, obj, bucketName, addr, objKey)
if err != nil {
return err
}
-
- // index storageID if it is present
if id != nil {
if err = setStorageID(tx, objectCore.AddressOf(obj), id, false); err != nil {
return err
@@ -244,10 +215,40 @@ func putUniqueIndexes(
}
}
+ if err := putExpirationEpoch(tx, obj, addr, objKey); err != nil {
+ return err
+ }
+
+ return putSplitInfo(tx, obj, bucketName, addr, si, objKey)
+}
+
+func putRawObjectData(tx *bbolt.Tx, obj *objectSDK.Object, bucketName []byte, addr oid.Address, objKey []byte) error {
+ switch obj.Type() {
+ case objectSDK.TypeRegular:
+ bucketName = primaryBucketName(addr.Container(), bucketName)
+ case objectSDK.TypeTombstone:
+ bucketName = tombstoneBucketName(addr.Container(), bucketName)
+ case objectSDK.TypeLock:
+ bucketName = bucketNameLockers(addr.Container(), bucketName)
+ default:
+ return ErrUnknownObjectType
+ }
+ rawObject, err := obj.CutPayload().Marshal()
+ if err != nil {
+ return fmt.Errorf("can't marshal object header: %w", err)
+ }
+ return putUniqueIndexItem(tx, namedBucketItem{
+ name: bucketName,
+ key: objKey,
+ val: rawObject,
+ })
+}
+
+func putExpirationEpoch(tx *bbolt.Tx, obj *objectSDK.Object, addr oid.Address, objKey []byte) error {
if expEpoch, ok := hasExpirationEpoch(obj); ok {
err := putUniqueIndexItem(tx, namedBucketItem{
name: expEpochToObjectBucketName,
- key: expirationEpochKey(expEpoch, cnr, addr.Object()),
+ key: expirationEpochKey(expEpoch, addr.Container(), addr.Object()),
val: zeroValue,
})
if err != nil {
@@ -256,7 +257,7 @@ func putUniqueIndexes(
val := make([]byte, epochSize)
binary.LittleEndian.PutUint64(val, expEpoch)
err = putUniqueIndexItem(tx, namedBucketItem{
- name: objectToExpirationEpochBucketName(cnr, make([]byte, bucketKeySize)),
+ name: objectToExpirationEpochBucketName(addr.Container(), make([]byte, bucketKeySize)),
key: objKey,
val: val,
})
@@ -264,8 +265,10 @@ func putUniqueIndexes(
return err
}
}
+ return nil
+}
- // index root object
+func putSplitInfo(tx *bbolt.Tx, obj *objectSDK.Object, bucketName []byte, addr oid.Address, si *objectSDK.SplitInfo, objKey []byte) error {
if obj.Type() == objectSDK.TypeRegular && !obj.HasParent() {
if ecHead := obj.ECHeader(); ecHead != nil {
parentID := ecHead.Parent()
@@ -283,9 +286,8 @@ func putUniqueIndexes(
}
objKey = objectKey(parentID, objKey)
}
- return updateSplitInfoIndex(tx, objKey, cnr, bucketName, si)
+ return updateSplitInfoIndex(tx, objKey, addr.Container(), bucketName, si)
}
-
return nil
}
From 7d0d781db1b1d674b319a012d653f2bb510f82ca Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Tue, 20 Aug 2024 12:59:59 +0300
Subject: [PATCH 050/705] [#1323] metabase: Drop user attribute index
Signed-off-by: Dmitrii Stepanov
---
pkg/local_object_storage/metabase/delete.go | 20 ---
pkg/local_object_storage/metabase/put.go | 49 -------
pkg/local_object_storage/metabase/select.go | 151 +++++---------------
pkg/local_object_storage/metabase/util.go | 11 +-
4 files changed, 38 insertions(+), 193 deletions(-)
diff --git a/pkg/local_object_storage/metabase/delete.go b/pkg/local_object_storage/metabase/delete.go
index 683bd445f..00c8d06e0 100644
--- a/pkg/local_object_storage/metabase/delete.go
+++ b/pkg/local_object_storage/metabase/delete.go
@@ -341,11 +341,6 @@ func (db *DB) deleteObject(
return fmt.Errorf("can't remove list indexes: %w", err)
}
- err = updateFKBTIndexes(tx, obj, delFKBTIndexItem)
- if err != nil {
- return fmt.Errorf("can't remove fake bucket tree indexes: %w", err)
- }
-
if isParent {
// remove record from the garbage bucket, because regular object deletion does nothing for virtual object
garbageBKT := tx.Bucket(garbageBucketName)
@@ -386,21 +381,6 @@ func delUniqueIndexItem(tx *bbolt.Tx, item namedBucketItem) {
}
}
-func delFKBTIndexItem(tx *bbolt.Tx, item namedBucketItem) error {
- bkt := tx.Bucket(item.name)
- if bkt == nil {
- return nil
- }
-
- fkbtRoot := bkt.Bucket(item.key)
- if fkbtRoot == nil {
- return nil
- }
-
- _ = fkbtRoot.Delete(item.val) // ignore error, best effort there
- return nil
-}
-
func delListIndexItem(tx *bbolt.Tx, item namedBucketItem) error {
bkt := tx.Bucket(item.name)
if bkt == nil {
diff --git a/pkg/local_object_storage/metabase/put.go b/pkg/local_object_storage/metabase/put.go
index 015f87f92..ff79a0387 100644
--- a/pkg/local_object_storage/metabase/put.go
+++ b/pkg/local_object_storage/metabase/put.go
@@ -175,11 +175,6 @@ func (db *DB) insertObject(tx *bbolt.Tx, obj *objectSDK.Object, id []byte, si *o
return fmt.Errorf("can't put list indexes: %w", err)
}
- err = updateFKBTIndexes(tx, obj, putFKBTIndexItem)
- if err != nil {
- return fmt.Errorf("can't put fake bucket tree indexes: %w", err)
- }
-
// update container volume size estimation
if obj.Type() == objectSDK.TypeRegular && !isParent {
err = changeContainerSize(tx, cnr, obj.PayloadSize(), true)
@@ -404,36 +399,6 @@ func hasExpirationEpoch(obj *objectSDK.Object) (uint64, bool) {
return 0, false
}
-func updateFKBTIndexes(tx *bbolt.Tx, obj *objectSDK.Object, f updateIndexItemFunc) error {
- id, _ := obj.ID()
- cnr, _ := obj.ContainerID()
- objKey := objectKey(id, make([]byte, objectKeySize))
-
- key := make([]byte, bucketKeySize)
- var attrs []objectSDK.Attribute
- if obj.ECHeader() != nil {
- attrs = obj.ECHeader().ParentAttributes()
- objKey = objectKey(obj.ECHeader().Parent(), make([]byte, objectKeySize))
- } else {
- attrs = obj.Attributes()
- }
-
- // user specified attributes
- for i := range attrs {
- key = attributeBucketName(cnr, attrs[i].Key(), key)
- err := f(tx, namedBucketItem{
- name: key,
- key: []byte(attrs[i].Value()),
- val: objKey,
- })
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
type bucketContainer interface {
Bucket([]byte) *bbolt.Bucket
CreateBucket([]byte) (*bbolt.Bucket, error)
@@ -464,20 +429,6 @@ func putUniqueIndexItem(tx *bbolt.Tx, item namedBucketItem) error {
return updateUniqueIndexItem(tx, item, func(_, val []byte) ([]byte, error) { return val, nil })
}
-func putFKBTIndexItem(tx *bbolt.Tx, item namedBucketItem) error {
- bkt, err := createBucketLikelyExists(tx, item.name)
- if err != nil {
- return fmt.Errorf("can't create index %v: %w", item.name, err)
- }
-
- fkbtRoot, err := createBucketLikelyExists(bkt, item.key)
- if err != nil {
- return fmt.Errorf("can't create fake bucket tree index %v: %w", item.key, err)
- }
-
- return fkbtRoot.Put(item.val, zeroValue)
-}
-
func putListIndexItem(tx *bbolt.Tx, item namedBucketItem) error {
bkt, err := createBucketLikelyExists(tx, item.name)
if err != nil {
diff --git a/pkg/local_object_storage/metabase/select.go b/pkg/local_object_storage/metabase/select.go
index 42737a41a..1863fc25e 100644
--- a/pkg/local_object_storage/metabase/select.go
+++ b/pkg/local_object_storage/metabase/select.go
@@ -198,9 +198,6 @@ func (db *DB) selectFastFilter(
switch f.Header() {
case v2object.FilterHeaderObjectID:
db.selectObjectID(tx, f, cnr, to, fNum, currEpoch)
- case v2object.FilterHeaderOwnerID,
- v2object.FilterHeaderPayloadHash:
- return // moved to slow filters
case v2object.FilterHeaderObjectType:
for _, bucketName := range bucketNamesForType(cnr, f.Operation(), f.Value()) {
selectAllFromBucket(tx, bucketName, to, fNum)
@@ -220,14 +217,7 @@ func (db *DB) selectFastFilter(
selectAllFromBucket(tx, primaryBucketName(cnr, bucketName), to, fNum)
selectAllFromBucket(tx, tombstoneBucketName(cnr, bucketName), to, fNum)
selectAllFromBucket(tx, bucketNameLockers(cnr, bucketName), to, fNum)
- default: // user attribute
- bucketName := attributeBucketName(cnr, f.Header(), bucketName)
-
- if f.Operation() == objectSDK.MatchNotPresent {
- selectOutsideFKBT(tx, allBucketNames(cnr), bucketName, to, fNum)
- } else {
- db.selectFromFKBT(tx, bucketName, f, to, fNum)
- }
+ default:
}
}
@@ -237,16 +227,6 @@ var mBucketNaming = map[string][]func(cid.ID, []byte) []byte{
v2object.TypeLock.String(): {bucketNameLockers},
}
-func allBucketNames(cnr cid.ID) (names [][]byte) {
- for _, fns := range mBucketNaming {
- for _, fn := range fns {
- names = append(names, fn(cnr, make([]byte, bucketKeySize)))
- }
- }
-
- return
-}
-
func bucketNamesForType(cnr cid.ID, mType objectSDK.SearchMatchType, typeVal string) (names [][]byte) {
appendNames := func(key string) {
fns, ok := mBucketNaming[key]
@@ -278,83 +258,6 @@ func bucketNamesForType(cnr cid.ID, mType objectSDK.SearchMatchType, typeVal str
return
}
-// selectFromList looks into index to find list of addresses to add in
-// resulting cache.
-func (db *DB) selectFromFKBT(
- tx *bbolt.Tx,
- name []byte, // fkbt root bucket name
- f objectSDK.SearchFilter, // filter for operation and value
- to map[string]int, // resulting cache
- fNum int, // index of filter
-) { //
- matchFunc, ok := db.matchers[f.Operation()]
- if !ok {
- db.log.Debug(logs.MetabaseMissingMatcher, zap.Uint32("operation", uint32(f.Operation())))
-
- return
- }
-
- fkbtRoot := tx.Bucket(name)
- if fkbtRoot == nil {
- return
- }
-
- err := matchFunc.matchBucket(fkbtRoot, f.Header(), f.Value(), func(k, _ []byte) error {
- fkbtLeaf := fkbtRoot.Bucket(k)
- if fkbtLeaf == nil {
- return nil
- }
-
- return fkbtLeaf.ForEach(func(k, _ []byte) error {
- markAddressInCache(to, fNum, string(k))
-
- return nil
- })
- })
- if err != nil {
- db.log.Debug(logs.MetabaseErrorInFKBTSelection, zap.String("error", err.Error()))
- }
-}
-
-// selectOutsideFKBT looks into all incl buckets to find list of addresses outside to add in
-// resulting cache.
-func selectOutsideFKBT(
- tx *bbolt.Tx,
- incl [][]byte, // buckets
- name []byte, // fkbt root bucket name
- to map[string]int, // resulting cache
- fNum int, // index of filter
-) {
- mExcl := make(map[string]struct{})
-
- bktExcl := tx.Bucket(name)
- if bktExcl != nil {
- _ = bktExcl.ForEachBucket(func(k []byte) error {
- exclBktLeaf := bktExcl.Bucket(k)
- return exclBktLeaf.ForEach(func(k, _ []byte) error {
- mExcl[string(k)] = struct{}{}
-
- return nil
- })
- })
- }
-
- for i := range incl {
- bktIncl := tx.Bucket(incl[i])
- if bktIncl == nil {
- continue
- }
-
- _ = bktIncl.ForEach(func(k, _ []byte) error {
- if _, ok := mExcl[string(k)]; !ok {
- markAddressInCache(to, fNum, string(k))
- }
-
- return nil
- })
- }
-}
-
// selectFromList looks into index to find list of addresses to add in
// resulting cache.
func (db *DB) selectFromList(
@@ -491,13 +394,7 @@ func (db *DB) matchSlowFilters(tx *bbolt.Tx, addr oid.Address, f objectSDK.Searc
}
for i := range f {
- matchFunc, ok := db.matchers[f[i].Operation()]
- if !ok {
- return false
- }
-
var data []byte
-
switch f[i].Header() {
case v2object.FilterHeaderVersion:
data = []byte(obj.Version().String())
@@ -515,8 +412,18 @@ func (db *DB) matchSlowFilters(tx *bbolt.Tx, addr oid.Address, f objectSDK.Searc
case v2object.FilterHeaderPayloadHash:
cs, _ := obj.PayloadChecksum()
data = cs.Value()
- default:
- continue // ignore unknown search attributes
+ default: // user attribute
+ v, ok := attributeValue(obj, f[i].Header())
+ if ok {
+ data = []byte(v)
+ } else {
+ return f[i].Operation() == objectSDK.MatchNotPresent
+ }
+ }
+
+ matchFunc, ok := db.matchers[f[i].Operation()]
+ if !ok {
+ return false
}
if !matchFunc.matchSlow(f[i].Header(), data, f[i].Value()) {
@@ -527,6 +434,19 @@ func (db *DB) matchSlowFilters(tx *bbolt.Tx, addr oid.Address, f objectSDK.Searc
return true
}
+func attributeValue(obj *objectSDK.Object, attribute string) (string, bool) {
+ objectAttributes := obj.Attributes()
+ if ech := obj.ECHeader(); ech != nil {
+ objectAttributes = ech.ParentAttributes()
+ }
+ for _, attr := range objectAttributes {
+ if attr.Key() == attribute {
+ return attr.Value(), true
+ }
+ }
+ return "", false
+}
+
// groupFilters divides filters in two groups: fast and slow. Fast filters
// processed by indexes and slow filters processed after by unmarshaling
// object headers.
@@ -545,16 +465,17 @@ func groupFilters(filters objectSDK.SearchFilters) (filterGroup, error) {
}
res.withCnrFilter = true
- case // slow filters
- v2object.FilterHeaderVersion,
- v2object.FilterHeaderCreationEpoch,
- v2object.FilterHeaderPayloadLength,
- v2object.FilterHeaderHomomorphicHash,
- v2object.FilterHeaderOwnerID,
- v2object.FilterHeaderPayloadHash:
- res.slowFilters = append(res.slowFilters, filters[i])
- default: // fast filters or user attributes if unknown
+ case // fast filters
+ v2object.FilterHeaderObjectID,
+ v2object.FilterHeaderObjectType,
+ v2object.FilterHeaderParent,
+ v2object.FilterHeaderSplitID,
+ v2object.FilterHeaderECParent,
+ v2object.FilterPropertyRoot,
+ v2object.FilterPropertyPhy:
res.fastFilters = append(res.fastFilters, filters[i])
+ default:
+ res.slowFilters = append(res.slowFilters, filters[i])
}
}
diff --git a/pkg/local_object_storage/metabase/util.go b/pkg/local_object_storage/metabase/util.go
index 012c0dcc8..9134616fe 100644
--- a/pkg/local_object_storage/metabase/util.go
+++ b/pkg/local_object_storage/metabase/util.go
@@ -95,10 +95,10 @@ const (
// Key: owner ID
// Value: bucket containing object IDs as keys
_
- // userAttributePrefix is used for prefixing FKBT index buckets containing objects.
+ // userAttributePrefix was used for prefixing FKBT index buckets containing objects.
// Key: attribute value
// Value: bucket containing object IDs as keys
- userAttributePrefix
+ _
// ====================
// List index buckets.
@@ -167,13 +167,6 @@ func smallBucketName(cnr cid.ID, key []byte) []byte {
return bucketName(cnr, smallPrefix, key)
}
-// attributeBucketName returns _attr_.
-func attributeBucketName(cnr cid.ID, attributeKey string, key []byte) []byte {
- key[0] = userAttributePrefix
- cnr.Encode(key[1:])
- return append(key[:bucketKeySize], attributeKey...)
-}
-
// rootBucketName returns _root.
func rootBucketName(cnr cid.ID, key []byte) []byte {
return bucketName(cnr, rootPrefix, key)
From 76f67ea34ea316a4b4f05bedb7a5aa2d9599912e Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Tue, 20 Aug 2024 15:39:44 +0300
Subject: [PATCH 051/705] [#1323] metabase: Bump version
Signed-off-by: Dmitrii Stepanov
---
pkg/local_object_storage/metabase/VERSION.md | 7 +++++++
pkg/local_object_storage/metabase/version.go | 2 +-
2 files changed, 8 insertions(+), 1 deletion(-)
diff --git a/pkg/local_object_storage/metabase/VERSION.md b/pkg/local_object_storage/metabase/VERSION.md
index 97e514db1..9cfc95332 100644
--- a/pkg/local_object_storage/metabase/VERSION.md
+++ b/pkg/local_object_storage/metabase/VERSION.md
@@ -2,6 +2,8 @@
This file describes changes between the metabase versions.
+Warning: database schema below is outdated and incomplete, see source code.
+
## Current
### Primary buckets
@@ -86,6 +88,11 @@ This file describes changes between the metabase versions.
# History
+## Version 3
+
+- Payload hash, owner ID and FKBT buckets deleted
+- Expiration epoch to object ID and object ID to expiration epoch added
+
## Version 2
- Container ID is encoded as 32-byte slice
diff --git a/pkg/local_object_storage/metabase/version.go b/pkg/local_object_storage/metabase/version.go
index 5748b64ee..bb2b66d9b 100644
--- a/pkg/local_object_storage/metabase/version.go
+++ b/pkg/local_object_storage/metabase/version.go
@@ -9,7 +9,7 @@ import (
)
// version contains current metabase version.
-const version = 2
+const version = 3
var versionKey = []byte("version")
From dfe825b81b5eead451ae20b76a50b382618ae7ba Mon Sep 17 00:00:00 2001
From: Aleksey Savchuk
Date: Wed, 21 Aug 2024 17:08:44 +0300
Subject: [PATCH 052/705] [#1309] test: Clean up config after tests
Signed-off-by: Aleksey Savchuk
---
cmd/frostfs-cli/internal/key/key_test.go | 2 ++
cmd/frostfs-node/config/test/config.go | 9 +++++++++
2 files changed, 11 insertions(+)
diff --git a/cmd/frostfs-cli/internal/key/key_test.go b/cmd/frostfs-cli/internal/key/key_test.go
index e3127a3fe..37e4fd4ee 100644
--- a/cmd/frostfs-cli/internal/key/key_test.go
+++ b/cmd/frostfs-cli/internal/key/key_test.go
@@ -24,6 +24,8 @@ var testCmd = &cobra.Command{
}
func Test_getOrGenerate(t *testing.T) {
+ t.Cleanup(viper.Reset)
+
dir := t.TempDir()
wallPath := filepath.Join(dir, "wallet.json")
diff --git a/cmd/frostfs-node/config/test/config.go b/cmd/frostfs-node/config/test/config.go
index 28ec65291..a93d7e648 100644
--- a/cmd/frostfs-node/config/test/config.go
+++ b/cmd/frostfs-node/config/test/config.go
@@ -40,6 +40,15 @@ func ForEachFileType(pref string, f func(*config.Config)) {
// ForEnvFileType creates config from `.env` file.
func ForEnvFileType(t testing.TB, pref string, f func(*config.Config)) {
+ envs := os.Environ()
+ t.Cleanup(func() {
+ os.Clearenv()
+ for _, env := range envs {
+ keyValue := strings.Split(env, "=")
+ os.Setenv(keyValue[0], keyValue[1])
+ }
+ })
+
f(fromEnvFile(t, pref+".env"))
}
From bd24beecf8652ca3662b4339b2799bf0385507a8 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Fri, 23 Aug 2024 12:28:09 +0300
Subject: [PATCH 053/705] [#1329] putSvc: Reset SuccessAfter for non-EC objects
in EC container broadcasting
Signed-off-by: Dmitrii Stepanov
---
pkg/services/object/put/distributed.go | 9 +++++++++
pkg/services/object/put/single.go | 9 ++++++---
pkg/services/object/put/streamer.go | 7 +++++--
pkg/services/object_manager/placement/traverser.go | 7 +++++++
4 files changed, 27 insertions(+), 5 deletions(-)
diff --git a/pkg/services/object/put/distributed.go b/pkg/services/object/put/distributed.go
index c71427b67..5176f7a54 100644
--- a/pkg/services/object/put/distributed.go
+++ b/pkg/services/object/put/distributed.go
@@ -24,6 +24,8 @@ type distributedTarget struct {
nodeTargetInitializer func(nodeDesc) preparedObjectTarget
relay func(context.Context, nodeDesc) error
+
+ resetSuccessAfterOnBroadcast bool
}
// parameters and state of container traversal.
@@ -35,6 +37,8 @@ type traversal struct {
// container nodes which was processed during the primary object placement
mExclude map[string]*bool
+
+ resetSuccessAfterOnBroadcast bool
}
// updates traversal parameters after the primary placement finish and
@@ -44,6 +48,10 @@ func (x *traversal) submitPrimaryPlacementFinish() bool {
// do not track success during container broadcast (best-effort)
x.opts = append(x.opts, placement.WithoutSuccessTracking())
+ if x.resetSuccessAfterOnBroadcast {
+ x.opts = append(x.opts, placement.ResetSuccessAfter())
+ }
+
// avoid 2nd broadcast
x.extraBroadcastEnabled = false
@@ -118,5 +126,6 @@ func (t *distributedTarget) iteratePlacement(ctx context.Context) error {
iter := t.cfg.newNodeIterator(append(t.placementOpts, placement.ForObject(id)))
iter.extraBroadcastEnabled = needAdditionalBroadcast(t.obj, false /* Distributed target is for cluster-wide PUT */)
+ iter.resetSuccessAfterOnBroadcast = t.resetSuccessAfterOnBroadcast
return iter.forEachNode(ctx, t.sendObject)
}
diff --git a/pkg/services/object/put/single.go b/pkg/services/object/put/single.go
index 9fa8ddb67..3cc8518f5 100644
--- a/pkg/services/object/put/single.go
+++ b/pkg/services/object/put/single.go
@@ -166,6 +166,7 @@ func (s *Service) saveToNodes(ctx context.Context, obj *objectSDK.Object, req *o
func (s *Service) saveToREPReplicas(ctx context.Context, placement putSinglePlacement, obj *objectSDK.Object, localOnly bool, req *objectAPI.PutSingleRequest, meta object.ContentMeta) error {
iter := s.cfg.newNodeIterator(placement.placementOptions)
iter.extraBroadcastEnabled = needAdditionalBroadcast(obj, localOnly)
+ iter.resetSuccessAfterOnBroadcast = placement.resetSuccessAfterOnBroadcast
signer := &putSingleRequestSigner{
req: req,
@@ -209,9 +210,10 @@ func (s *Service) saveToECReplicas(ctx context.Context, placement putSinglePlace
}
type putSinglePlacement struct {
- placementOptions []placement.Option
- isEC bool
- container containerSDK.Container
+ placementOptions []placement.Option
+ isEC bool
+ container containerSDK.Container
+ resetSuccessAfterOnBroadcast bool
}
func (s *Service) getPutSinglePlacementOptions(obj *objectSDK.Object, copiesNumber []uint32, localOnly bool) (putSinglePlacement, error) {
@@ -232,6 +234,7 @@ func (s *Service) getPutSinglePlacementOptions(obj *objectSDK.Object, copiesNumb
}
if container.IsECContainer(cnrInfo.Value) && !object.IsECSupported(obj) && !localOnly {
result.placementOptions = append(result.placementOptions, placement.SuccessAfter(uint32(policy.ECParityCount(cnrInfo.Value.PlacementPolicy())+1)))
+ result.resetSuccessAfterOnBroadcast = true
}
result.placementOptions = append(result.placementOptions, placement.ForContainer(cnrInfo.Value))
diff --git a/pkg/services/object/put/streamer.go b/pkg/services/object/put/streamer.go
index 969c8fa19..6b396ec96 100644
--- a/pkg/services/object/put/streamer.go
+++ b/pkg/services/object/put/streamer.go
@@ -246,16 +246,19 @@ func (p *Streamer) newDefaultObjectWriter(prm *PutInitPrm, forECPlacement bool)
}
}
+ var resetSuccessAfterOnBroadcast bool
traverseOpts := prm.traverseOpts
if forECPlacement && !prm.common.LocalOnly() {
// save non-regular and linking object to EC container.
// EC 2.1 -> REP 2, EC 2.2 -> REP 3 etc.
traverseOpts = append(traverseOpts, placement.SuccessAfter(uint32(policy.ECParityCount(prm.cnr.PlacementPolicy())+1)))
+ resetSuccessAfterOnBroadcast = true
}
return &distributedTarget{
- cfg: p.cfg,
- placementOpts: traverseOpts,
+ cfg: p.cfg,
+ placementOpts: traverseOpts,
+ resetSuccessAfterOnBroadcast: resetSuccessAfterOnBroadcast,
nodeTargetInitializer: func(node nodeDesc) preparedObjectTarget {
if node.local {
return localTarget{
diff --git a/pkg/services/object_manager/placement/traverser.go b/pkg/services/object_manager/placement/traverser.go
index 306169571..9a5877c52 100644
--- a/pkg/services/object_manager/placement/traverser.go
+++ b/pkg/services/object_manager/placement/traverser.go
@@ -303,6 +303,13 @@ func SuccessAfter(v uint32) Option {
}
}
+// ResetSuccessAfter resets flat success number setting option.
+func ResetSuccessAfter() Option {
+ return func(c *cfg) {
+ c.flatSuccess = nil
+ }
+}
+
// WithoutSuccessTracking disables success tracking in traversal.
func WithoutSuccessTracking() Option {
return func(c *cfg) {
From a059a7dcf06ed41354f1defe5d1822b79d4c1deb Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Fri, 23 Aug 2024 13:36:52 +0300
Subject: [PATCH 054/705] [#1329] cli: Skip linking objects in complex object
processing
Signed-off-by: Dmitrii Stepanov
---
cmd/frostfs-cli/modules/object/nodes.go | 74 ++++++++++++-------------
cmd/frostfs-cli/modules/object/util.go | 9 +--
2 files changed, 39 insertions(+), 44 deletions(-)
diff --git a/cmd/frostfs-cli/modules/object/nodes.go b/cmd/frostfs-cli/modules/object/nodes.go
index 42ae7324e..4efe04d16 100644
--- a/cmd/frostfs-cli/modules/object/nodes.go
+++ b/cmd/frostfs-cli/modules/object/nodes.go
@@ -172,7 +172,7 @@ func getComplexObjectParts(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *
func getCompexObjectMembers(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, prmHead internalclient.HeadObjectPrm, errSplitInfo *objectSDK.SplitInfoError) []oid.ID {
splitInfo := errSplitInfo.SplitInfo()
- if members, ok := tryGetSplitMembersByLinkingObject(cmd, splitInfo, prmHead, cnrID, false); ok {
+ if members, ok := tryGetSplitMembersByLinkingObject(cmd, splitInfo, prmHead, cnrID); ok {
return members
}
@@ -185,6 +185,7 @@ func getCompexObjectMembers(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli
func flattenComplexMembersIfECContainer(cmd *cobra.Command, cnrID cid.ID, members []oid.ID, prmHead internalclient.HeadObjectPrm) []phyObject {
result := make([]phyObject, 0, len(members))
+ var hasNonEC, hasEC bool
var resultGuard sync.Mutex
if len(members) == 0 {
@@ -193,31 +194,8 @@ func flattenComplexMembersIfECContainer(cmd *cobra.Command, cnrID cid.ID, member
prmHead.SetRawFlag(true) // to get an error instead of whole object
- first := members[0]
- var addrObj oid.Address
- addrObj.SetContainer(cnrID)
- addrObj.SetObject(first)
- prmHead.SetAddress(addrObj)
-
- _, err := internalclient.HeadObject(cmd.Context(), prmHead)
- var ecInfoError *objectSDK.ECInfoError
- if errors.As(err, &ecInfoError) {
- chunks := getECObjectChunks(cmd, cnrID, first, ecInfoError)
- result = append(result, chunks...)
- } else if err == nil { // not EC object, so all members must be phy objects
- for _, member := range members {
- result = append(result, phyObject{
- containerID: cnrID,
- objectID: member,
- })
- }
- return result
- } else {
- commonCmd.ExitOnErr(cmd, "failed to flatten parts of complex object: %w", err)
- }
-
eg, egCtx := errgroup.WithContext(cmd.Context())
- for idx := 1; idx < len(members); idx++ {
+ for idx := 0; idx < len(members); idx++ {
partObjID := members[idx]
eg.Go(func() error {
@@ -227,24 +205,44 @@ func flattenComplexMembersIfECContainer(cmd *cobra.Command, cnrID cid.ID, member
partAddr.SetObject(partObjID)
partHeadPrm.SetAddress(partAddr)
- _, err := internalclient.HeadObject(egCtx, partHeadPrm)
- var ecInfoError *objectSDK.ECInfoError
- if errors.As(err, &ecInfoError) {
- chunks := getECObjectChunks(cmd, cnrID, partObjID, ecInfoError)
-
- resultGuard.Lock()
- defer resultGuard.Unlock()
- result = append(result, chunks...)
-
- return nil
- } else if err == nil {
- return errMalformedComplexObject
+ obj, err := internalclient.HeadObject(egCtx, partHeadPrm)
+ if err != nil {
+ var ecInfoError *objectSDK.ECInfoError
+ if errors.As(err, &ecInfoError) {
+ resultGuard.Lock()
+ defer resultGuard.Unlock()
+ result = append(result, getECObjectChunks(cmd, cnrID, partObjID, ecInfoError)...)
+ hasEC = true
+ return nil
+ }
+ return err
}
- return err
+
+ if obj.Header().Type() != objectSDK.TypeRegular {
+ commonCmd.ExitOnErr(cmd, "failed to flatten parts of complex object: %w", fmt.Errorf("object '%s' with type '%s' is not supported as part of complex object", partAddr, obj.Header().Type()))
+ }
+
+ if len(obj.Header().Children()) > 0 {
+ // linking object is not data object, so skip it
+ return nil
+ }
+
+ resultGuard.Lock()
+ defer resultGuard.Unlock()
+ result = append(result, phyObject{
+ containerID: cnrID,
+ objectID: partObjID,
+ })
+ hasNonEC = true
+
+ return nil
})
}
commonCmd.ExitOnErr(cmd, "failed to flatten parts of complex object: %w", eg.Wait())
+ if hasEC && hasNonEC {
+ commonCmd.ExitOnErr(cmd, "failed to flatten parts of complex object: %w", errMalformedComplexObject)
+ }
return result
}
diff --git a/cmd/frostfs-cli/modules/object/util.go b/cmd/frostfs-cli/modules/object/util.go
index 96b80fe1b..b090c9f8c 100644
--- a/cmd/frostfs-cli/modules/object/util.go
+++ b/cmd/frostfs-cli/modules/object/util.go
@@ -374,7 +374,7 @@ func collectObjectRelatives(cmd *cobra.Command, cli *client.Client, cnr cid.ID,
common.PrintVerbose(cmd, "Split information received - object is virtual.")
splitInfo := errSplit.SplitInfo()
- if members, ok := tryGetSplitMembersByLinkingObject(cmd, splitInfo, prmHead, cnr, true); ok {
+ if members, ok := tryGetSplitMembersByLinkingObject(cmd, splitInfo, prmHead, cnr); ok {
return members
}
@@ -390,7 +390,7 @@ func collectObjectRelatives(cmd *cobra.Command, cli *client.Client, cnr cid.ID,
return nil
}
-func tryGetSplitMembersByLinkingObject(cmd *cobra.Command, splitInfo *objectSDK.SplitInfo, prmHead internal.HeadObjectPrm, cnr cid.ID, withLinking bool) ([]oid.ID, bool) {
+func tryGetSplitMembersByLinkingObject(cmd *cobra.Command, splitInfo *objectSDK.SplitInfo, prmHead internal.HeadObjectPrm, cnr cid.ID) ([]oid.ID, bool) {
// collect split chain by the descending ease of operations (ease is evaluated heuristically).
// If any approach fails, we don't try the next since we assume that it will fail too.
@@ -411,10 +411,7 @@ func tryGetSplitMembersByLinkingObject(cmd *cobra.Command, splitInfo *objectSDK.
common.PrintVerbose(cmd, "Received split members from the linking object: %v", children)
- if withLinking {
- return append(children, idLinking), true
- }
- return children, true
+ return append(children, idLinking), true
}
// linking object is not required for
From 80099d9a2ff1685ecc08b6f71bcba777ca51ca1d Mon Sep 17 00:00:00 2001
From: Evgenii Stratonikov
Date: Fri, 23 Aug 2024 10:57:03 +0300
Subject: [PATCH 055/705] [#1328] pilorama: Add tricky test for
SortedByFilename
Signed-off-by: Evgenii Stratonikov
---
.../pilorama/forest_test.go | 62 +++++++++++++++++++
1 file changed, 62 insertions(+)
diff --git a/pkg/local_object_storage/pilorama/forest_test.go b/pkg/local_object_storage/pilorama/forest_test.go
index 001d095c8..c6c6e8c8b 100644
--- a/pkg/local_object_storage/pilorama/forest_test.go
+++ b/pkg/local_object_storage/pilorama/forest_test.go
@@ -1,11 +1,13 @@
package pilorama
import (
+ "bytes"
"context"
"crypto/rand"
"fmt"
mrand "math/rand"
"path/filepath"
+ "slices"
"strconv"
"strings"
"sync"
@@ -232,6 +234,66 @@ func BenchmarkForestSortedIteration(b *testing.B) {
}
}
+// The issue which we call "BugWithSkip" is easiest to understand when filenames are
+// monotonically increasing numbers. We want the list of sorted filenames to have different length interleaved.
+// The bug happens when we switch between length during listing.
+// Thus this test contains numbers from 1 to 1000 and batch size of size 100.
+func TestForest_TreeSortedIterationBugWithSkip(t *testing.T) {
+ t.Skip()
+ for i := range providers {
+ t.Run(providers[i].name, func(t *testing.T) {
+ testForestTreeSortedIterationBugWithSkip(t, providers[i].construct(t))
+ })
+ }
+}
+
+func testForestTreeSortedIterationBugWithSkip(t *testing.T, s ForestStorage) {
+ defer func() { require.NoError(t, s.Close()) }()
+
+ cid := cidtest.ID()
+ d := CIDDescriptor{cid, 0, 1}
+ treeID := "version"
+ treeAdd := func(t *testing.T, ts int, filename string) {
+ _, err := s.TreeMove(context.Background(), d, treeID, &Move{
+ Child: RootID + uint64(ts),
+ Parent: RootID,
+ Meta: Meta{
+ Time: Timestamp(ts),
+ Items: []KeyValue{
+ {Key: AttributeFilename, Value: []byte(filename)},
+ },
+ },
+ })
+ require.NoError(t, err)
+ }
+
+ const count = 2000
+ treeAdd(t, 1, "")
+ for i := 1; i < count; i++ {
+ treeAdd(t, i+1, strconv.Itoa(i+1))
+ }
+
+ var result []MultiNodeInfo
+ treeAppend := func(t *testing.T, last *string, count int) *string {
+ res, cursor, err := s.TreeSortedByFilename(context.Background(), d.CID, treeID, MultiNode{RootID}, last, count)
+ require.NoError(t, err)
+ result = append(result, res...)
+ return cursor
+ }
+
+ const batchSize = 10
+ last := treeAppend(t, nil, batchSize)
+ for i := 1; i < count/batchSize; i++ {
+ last = treeAppend(t, last, batchSize)
+ }
+ require.Len(t, result, count)
+ require.True(t, slices.IsSortedFunc(result, func(a, b MultiNodeInfo) int {
+ filenameA := findAttr(a.Meta, AttributeFilename)
+ filenameB := findAttr(b.Meta, AttributeFilename)
+ return bytes.Compare(filenameA, filenameB)
+ }))
+}
+
func TestForest_TreeSortedIteration(t *testing.T) {
for i := range providers {
t.Run(providers[i].name, func(t *testing.T) {
From 806ea37101fc35cedca3e10681e43d7a91968d67 Mon Sep 17 00:00:00 2001
From: Evgenii Stratonikov
Date: Fri, 23 Aug 2024 10:58:33 +0300
Subject: [PATCH 056/705] [#1328] pilorama: Do not skip items in
SortedByFilename
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Benchmark results:
```
goos: linux
goarch: amd64
pkg: git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama
cpu: 11th Gen Intel(R) Core(TM) i5-1135G7 @ 2.40GHz
│ old │ new │
│ sec/op │ sec/op vs base │
ForestSortedIteration/bbolt,root-8 207.2µ ± 6% 173.6µ ± 6% -16.23% (p=0.000 n=10)
ForestSortedIteration/bbolt,leaf-8 3.910µ ± 5% 3.928µ ± 7% ~ (p=0.529 n=10)
geomean 28.46µ 26.11µ -8.27%
```
They are not representative, as the worst case is when we have multiple
items of different lengths. However, `FileName` is usually less than 100
in practice, so the asymptotics is the same.
Signed-off-by: Evgenii Stratonikov
---
pkg/local_object_storage/pilorama/boltdb.go | 1 +
.../pilorama/forest_test.go | 3 +-
pkg/local_object_storage/pilorama/heap.go | 38 +++++++++++++++----
3 files changed, 32 insertions(+), 10 deletions(-)
diff --git a/pkg/local_object_storage/pilorama/boltdb.go b/pkg/local_object_storage/pilorama/boltdb.go
index 48363ceac..29941be83 100644
--- a/pkg/local_object_storage/pilorama/boltdb.go
+++ b/pkg/local_object_storage/pilorama/boltdb.go
@@ -1161,6 +1161,7 @@ func (t *boltForest) fillSortedChildren(b *bbolt.Bucket, nodeIDs MultiNode, h *f
lastFilename = nil
nodes = nil
length = actualLength + 1
+ count = 0
c.Seek(append(prefix, byte(length), byte(length>>8)))
c.Prev() // c.Next() will be performed by for loop
}
diff --git a/pkg/local_object_storage/pilorama/forest_test.go b/pkg/local_object_storage/pilorama/forest_test.go
index c6c6e8c8b..ecca9842f 100644
--- a/pkg/local_object_storage/pilorama/forest_test.go
+++ b/pkg/local_object_storage/pilorama/forest_test.go
@@ -237,9 +237,8 @@ func BenchmarkForestSortedIteration(b *testing.B) {
// The issue which we call "BugWithSkip" is easiest to understand when filenames are
// monotonically increasing numbers. We want the list of sorted filenames to have different length interleaved.
// The bug happens when we switch between length during listing.
-// Thus this test contains numbers from 1 to 1000 and batch size of size 100.
+// Thus this test contains numbers from 1 to 2000 and batch size of size 10.
func TestForest_TreeSortedIterationBugWithSkip(t *testing.T) {
- t.Skip()
for i := range providers {
t.Run(providers[i].name, func(t *testing.T) {
testForestTreeSortedIterationBugWithSkip(t, providers[i].construct(t))
diff --git a/pkg/local_object_storage/pilorama/heap.go b/pkg/local_object_storage/pilorama/heap.go
index ec57b9e1f..5a00bcf7a 100644
--- a/pkg/local_object_storage/pilorama/heap.go
+++ b/pkg/local_object_storage/pilorama/heap.go
@@ -2,6 +2,8 @@ package pilorama
import (
"container/heap"
+ "slices"
+ "strings"
)
type heapInfo struct {
@@ -28,9 +30,10 @@ func (h *filenameHeap) Pop() any {
// fixedHeap maintains a fixed number of smallest elements started at some point.
type fixedHeap struct {
- start *string
- count int
- h *filenameHeap
+ start *string
+ sorted bool
+ count int
+ h *filenameHeap
}
func newHeap(start *string, count int) *fixedHeap {
@@ -44,20 +47,39 @@ func newHeap(start *string, count int) *fixedHeap {
}
}
+const amortizationMultiplier = 5
+
func (h *fixedHeap) push(id MultiNode, filename string) bool {
if h.start != nil && filename <= *h.start {
return false
}
- heap.Push(h.h, heapInfo{id: id, filename: filename})
- if h.h.Len() > h.count {
- heap.Remove(h.h, h.h.Len()-1)
+
+ *h.h = append(*h.h, heapInfo{id: id, filename: filename})
+ h.sorted = false
+
+ if h.h.Len() > h.count*amortizationMultiplier {
+ slices.SortFunc(*h.h, func(a, b heapInfo) int {
+ return strings.Compare(a.filename, b.filename)
+ })
+ *h.h = (*h.h)[:h.count]
}
return true
}
func (h *fixedHeap) pop() (heapInfo, bool) {
- if h.h.Len() != 0 {
- return heap.Pop(h.h).(heapInfo), true
+ if !h.sorted {
+ slices.SortFunc(*h.h, func(a, b heapInfo) int {
+ return strings.Compare(a.filename, b.filename)
+ })
+ if len(*h.h) > h.count {
+ *h.h = (*h.h)[:h.count]
+ }
+ h.sorted = true
+ }
+ if len(*h.h) != 0 {
+ info := (*h.h)[0]
+ *h.h = (*h.h)[1:]
+ return info, true
}
return heapInfo{}, false
}
From fa7f9fbce240dde6e0a3c6ded13cb769bbe3b2bf Mon Sep 17 00:00:00 2001
From: Evgenii Stratonikov
Date: Mon, 26 Aug 2024 15:36:39 +0300
Subject: [PATCH 057/705] [#1333] go.mod: Update api-go
Signed-off-by: Evgenii Stratonikov
---
go.mod | 2 +-
go.sum | 4 ++--
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/go.mod b/go.mod
index b665709cc..b0df97511 100644
--- a/go.mod
+++ b/go.mod
@@ -4,7 +4,7 @@ go 1.22
require (
code.gitea.io/sdk/gitea v0.17.1
- git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240819074700-a43110e36326
+ git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240826113619-9e82a5a31a88
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d
diff --git a/go.sum b/go.sum
index 54e0d0301..f3a17363d 100644
--- a/go.sum
+++ b/go.sum
@@ -1,7 +1,7 @@
code.gitea.io/sdk/gitea v0.17.1 h1:3jCPOG2ojbl8AcfaUCRYLT5MUcBMFwS0OSK2mA5Zok8=
code.gitea.io/sdk/gitea v0.17.1/go.mod h1:aCnBqhHpoEWA180gMbaCtdX9Pl6BWBAuuP2miadoTNM=
-git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240819074700-a43110e36326 h1:TkH+NSsY4C/Z8MocIJyMcqLm5vEhZcSowOldJyilKKA=
-git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240819074700-a43110e36326/go.mod h1:zZnHiRv9m5+ESYLhBXY9Jds9A/YIDEUGiuyPUS09HwM=
+git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240826113619-9e82a5a31a88 h1:ckD87Z4pvPtu2hjpRcqPHlAtgOHPZfSW3x+zzwZztiY=
+git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240826113619-9e82a5a31a88/go.mod h1:BDnEpkKMykCS8u1nLzR6SgNzCv6885RWlo5TnravQuI=
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e h1:kcBqZBiFIUBATUqEuvVigtkJJWQ2Gug/eYXn967o3M4=
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e/go.mod h1:F/fe1OoIDKr5Bz99q4sriuHDuf3aZefZy9ZsCqEtgxc=
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSVCB8JNSfPG7Uk4r2oSk=
From 5e9a97fd3eabc58347260b06fc6e1da4f5cf2185 Mon Sep 17 00:00:00 2001
From: Evgenii Stratonikov
Date: Tue, 27 Aug 2024 14:46:56 +0300
Subject: [PATCH 058/705] [#1336] go.mod: Update api-go and sdk-go
Signed-off-by: Evgenii Stratonikov
---
go.mod | 4 +-
go.sum | 8 +-
pkg/services/control/ir/service_frostfs.pb.go | 32 +--
pkg/services/control/service_frostfs.pb.go | 196 +++++++++---------
pkg/services/control/types_frostfs.pb.go | 6 +-
pkg/services/tree/service_frostfs.pb.go | 98 ++++-----
6 files changed, 172 insertions(+), 172 deletions(-)
diff --git a/go.mod b/go.mod
index b0df97511..b7f59c823 100644
--- a/go.mod
+++ b/go.mod
@@ -4,12 +4,12 @@ go 1.22
require (
code.gitea.io/sdk/gitea v0.17.1
- git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240826113619-9e82a5a31a88
+ git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240827104600-eba18f6e67ac
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20231101111734-b3ad3335ff65
- git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240820072028-6dd7be11d13b
+ git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240827083309-f0b9493ce3f7
git.frostfs.info/TrueCloudLab/hrw v1.2.1
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240814080254-96225afacb88
git.frostfs.info/TrueCloudLab/tzhash v1.8.0
diff --git a/go.sum b/go.sum
index f3a17363d..d2f926151 100644
--- a/go.sum
+++ b/go.sum
@@ -1,7 +1,7 @@
code.gitea.io/sdk/gitea v0.17.1 h1:3jCPOG2ojbl8AcfaUCRYLT5MUcBMFwS0OSK2mA5Zok8=
code.gitea.io/sdk/gitea v0.17.1/go.mod h1:aCnBqhHpoEWA180gMbaCtdX9Pl6BWBAuuP2miadoTNM=
-git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240826113619-9e82a5a31a88 h1:ckD87Z4pvPtu2hjpRcqPHlAtgOHPZfSW3x+zzwZztiY=
-git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240826113619-9e82a5a31a88/go.mod h1:BDnEpkKMykCS8u1nLzR6SgNzCv6885RWlo5TnravQuI=
+git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240827104600-eba18f6e67ac h1:Gu3oiPnsSZPgwsUYs2f3xTQwndM/OWM/zo3zbN4rOb8=
+git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240827104600-eba18f6e67ac/go.mod h1:BDnEpkKMykCS8u1nLzR6SgNzCv6885RWlo5TnravQuI=
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e h1:kcBqZBiFIUBATUqEuvVigtkJJWQ2Gug/eYXn967o3M4=
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e/go.mod h1:F/fe1OoIDKr5Bz99q4sriuHDuf3aZefZy9ZsCqEtgxc=
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSVCB8JNSfPG7Uk4r2oSk=
@@ -10,8 +10,8 @@ git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d/go.mod h1:7ZZq8iguY7qFsXajdHGmZd2AW4QbucyrJwhbsRfOfek=
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20231101111734-b3ad3335ff65 h1:PaZ8GpnUoXxUoNsc1qp36bT2u7FU+neU4Jn9cl8AWqI=
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20231101111734-b3ad3335ff65/go.mod h1:6aAX80dvJ3r5fjN9CzzPglRptoiPgIC9KFGGsUA+1Hw=
-git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240820072028-6dd7be11d13b h1:ZCJBVmQDcdv0twpX9xJU/AQwX+dXyvVfqr0Pq3x+3yk=
-git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240820072028-6dd7be11d13b/go.mod h1:aaC2OR34tVrBwd0Z2gqoN5WLtV/idKqpqPDhb4XqmCo=
+git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240827083309-f0b9493ce3f7 h1:9eZidZMT4tHOdc6GZRPlZR12IToKqHhUd5wzevdDUqo=
+git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240827083309-f0b9493ce3f7/go.mod h1:VzVYcwo/eXjkdo5ktPdZeAE4fsnZX6zEun3g+5E2weo=
git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc=
git.frostfs.info/TrueCloudLab/hrw v1.2.1/go.mod h1:C1Ygde2n843yTZEQ0FP69jYiuaYV0kriLvP4zm8JuvM=
git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20240726093631-5481339d6928 h1:LK3mCkNZkY48eBA9jnk1N0eQZLsZhOG+XYw4EBoKUjM=
diff --git a/pkg/services/control/ir/service_frostfs.pb.go b/pkg/services/control/ir/service_frostfs.pb.go
index 786095802..66d196617 100644
--- a/pkg/services/control/ir/service_frostfs.pb.go
+++ b/pkg/services/control/ir/service_frostfs.pb.go
@@ -164,10 +164,10 @@ func (x *HealthCheckRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -495,10 +495,10 @@ func (x *HealthCheckResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -810,10 +810,10 @@ func (x *TickEpochRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -1125,10 +1125,10 @@ func (x *TickEpochResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -1471,10 +1471,10 @@ func (x *RemoveNodeRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -1786,10 +1786,10 @@ func (x *RemoveNodeResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -2163,10 +2163,10 @@ func (x *RemoveContainerRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -2478,10 +2478,10 @@ func (x *RemoveContainerResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
diff --git a/pkg/services/control/service_frostfs.pb.go b/pkg/services/control/service_frostfs.pb.go
index afd1c3c41..a446c5e59 100644
--- a/pkg/services/control/service_frostfs.pb.go
+++ b/pkg/services/control/service_frostfs.pb.go
@@ -164,10 +164,10 @@ func (x *HealthCheckRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -542,10 +542,10 @@ func (x *HealthCheckResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -904,10 +904,10 @@ func (x *SetNetmapStatusRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -1185,10 +1185,10 @@ func (x *SetNetmapStatusResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -1466,10 +1466,10 @@ func (x *GetNetmapStatusRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -1828,10 +1828,10 @@ func (x *GetNetmapStatusResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -2157,10 +2157,10 @@ func (x *DropObjectsRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -2438,10 +2438,10 @@ func (x *DropObjectsResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -2719,10 +2719,10 @@ func (x *ListShardsRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -2888,7 +2888,7 @@ func (x *ListShardsResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
return
}
for i := range x.Shards {
- if x.Shards[i] != nil && x.Shards[i].StableSize() != 0 {
+ if x.Shards[i] != nil {
x.Shards[i].EmitProtobuf(mm.AppendMessage(1))
}
}
@@ -3057,10 +3057,10 @@ func (x *ListShardsResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -3464,10 +3464,10 @@ func (x *SetShardModeRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -3745,10 +3745,10 @@ func (x *SetShardModeResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -4122,10 +4122,10 @@ func (x *SynchronizeTreeRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -4403,10 +4403,10 @@ func (x *SynchronizeTreeResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -4763,10 +4763,10 @@ func (x *EvacuateShardRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -5078,10 +5078,10 @@ func (x *EvacuateShardResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -5438,10 +5438,10 @@ func (x *FlushCacheRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -5719,10 +5719,10 @@ func (x *FlushCacheResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -6065,10 +6065,10 @@ func (x *DoctorRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -6346,10 +6346,10 @@ func (x *DoctorResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -6772,10 +6772,10 @@ func (x *StartShardEvacuationRequest) EmitProtobuf(mm *easyproto.MessageMarshale
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -7053,10 +7053,10 @@ func (x *StartShardEvacuationResponse) EmitProtobuf(mm *easyproto.MessageMarshal
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -7334,10 +7334,10 @@ func (x *GetShardEvacuationStatusRequest) EmitProtobuf(mm *easyproto.MessageMars
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -7830,10 +7830,10 @@ func (x *GetShardEvacuationStatusResponse_Body) EmitProtobuf(mm *easyproto.Messa
if int32(x.Status) != 0 {
mm.AppendInt32(5, int32(x.Status))
}
- if x.Duration != nil && x.Duration.StableSize() != 0 {
+ if x.Duration != nil {
x.Duration.EmitProtobuf(mm.AppendMessage(6))
}
- if x.StartedAt != nil && x.StartedAt.StableSize() != 0 {
+ if x.StartedAt != nil {
x.StartedAt.EmitProtobuf(mm.AppendMessage(7))
}
if len(x.ErrorMessage) != 0 {
@@ -8321,10 +8321,10 @@ func (x *GetShardEvacuationStatusResponse) EmitProtobuf(mm *easyproto.MessageMar
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -8602,10 +8602,10 @@ func (x *ResetShardEvacuationStatusRequest) EmitProtobuf(mm *easyproto.MessageMa
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -8883,10 +8883,10 @@ func (x *ResetShardEvacuationStatusResponse) EmitProtobuf(mm *easyproto.MessageM
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -9164,10 +9164,10 @@ func (x *StopShardEvacuationRequest) EmitProtobuf(mm *easyproto.MessageMarshaler
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -9445,10 +9445,10 @@ func (x *StopShardEvacuationResponse) EmitProtobuf(mm *easyproto.MessageMarshale
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -9613,7 +9613,7 @@ func (x *AddChainLocalOverrideRequest_Body) EmitProtobuf(mm *easyproto.MessageMa
if x == nil {
return
}
- if x.Target != nil && x.Target.StableSize() != 0 {
+ if x.Target != nil {
x.Target.EmitProtobuf(mm.AppendMessage(1))
}
if len(x.Chain) != 0 {
@@ -9795,10 +9795,10 @@ func (x *AddChainLocalOverrideRequest) EmitProtobuf(mm *easyproto.MessageMarshal
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -10110,10 +10110,10 @@ func (x *AddChainLocalOverrideResponse) EmitProtobuf(mm *easyproto.MessageMarsha
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -10278,7 +10278,7 @@ func (x *GetChainLocalOverrideRequest_Body) EmitProtobuf(mm *easyproto.MessageMa
if x == nil {
return
}
- if x.Target != nil && x.Target.StableSize() != 0 {
+ if x.Target != nil {
x.Target.EmitProtobuf(mm.AppendMessage(1))
}
if len(x.ChainId) != 0 {
@@ -10460,10 +10460,10 @@ func (x *GetChainLocalOverrideRequest) EmitProtobuf(mm *easyproto.MessageMarshal
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -10775,10 +10775,10 @@ func (x *GetChainLocalOverrideResponse) EmitProtobuf(mm *easyproto.MessageMarsha
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -10941,7 +10941,7 @@ func (x *ListChainLocalOverridesRequest_Body) EmitProtobuf(mm *easyproto.Message
if x == nil {
return
}
- if x.Target != nil && x.Target.StableSize() != 0 {
+ if x.Target != nil {
x.Target.EmitProtobuf(mm.AppendMessage(1))
}
}
@@ -11094,10 +11094,10 @@ func (x *ListChainLocalOverridesRequest) EmitProtobuf(mm *easyproto.MessageMarsh
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -11423,10 +11423,10 @@ func (x *ListChainLocalOverridesResponse) EmitProtobuf(mm *easyproto.MessageMars
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -11738,10 +11738,10 @@ func (x *ListTargetsLocalOverridesRequest) EmitProtobuf(mm *easyproto.MessageMar
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -11907,7 +11907,7 @@ func (x *ListTargetsLocalOverridesResponse_Body) EmitProtobuf(mm *easyproto.Mess
return
}
for i := range x.Targets {
- if x.Targets[i] != nil && x.Targets[i].StableSize() != 0 {
+ if x.Targets[i] != nil {
x.Targets[i].EmitProtobuf(mm.AppendMessage(1))
}
}
@@ -12076,10 +12076,10 @@ func (x *ListTargetsLocalOverridesResponse) EmitProtobuf(mm *easyproto.MessageMa
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -12244,7 +12244,7 @@ func (x *RemoveChainLocalOverrideRequest_Body) EmitProtobuf(mm *easyproto.Messag
if x == nil {
return
}
- if x.Target != nil && x.Target.StableSize() != 0 {
+ if x.Target != nil {
x.Target.EmitProtobuf(mm.AppendMessage(1))
}
if len(x.ChainId) != 0 {
@@ -12426,10 +12426,10 @@ func (x *RemoveChainLocalOverrideRequest) EmitProtobuf(mm *easyproto.MessageMars
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -12707,10 +12707,10 @@ func (x *RemoveChainLocalOverrideResponse) EmitProtobuf(mm *easyproto.MessageMar
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -12873,7 +12873,7 @@ func (x *RemoveChainLocalOverridesByTargetRequest_Body) EmitProtobuf(mm *easypro
if x == nil {
return
}
- if x.Target != nil && x.Target.StableSize() != 0 {
+ if x.Target != nil {
x.Target.EmitProtobuf(mm.AppendMessage(1))
}
}
@@ -13026,10 +13026,10 @@ func (x *RemoveChainLocalOverridesByTargetRequest) EmitProtobuf(mm *easyproto.Me
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -13307,10 +13307,10 @@ func (x *RemoveChainLocalOverridesByTargetResponse) EmitProtobuf(mm *easyproto.M
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -13760,10 +13760,10 @@ func (x *SealWriteCacheRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -14120,7 +14120,7 @@ func (x *SealWriteCacheResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshale
return
}
for i := range x.Results {
- if x.Results[i] != nil && x.Results[i].StableSize() != 0 {
+ if x.Results[i] != nil {
x.Results[i].EmitProtobuf(mm.AppendMessage(1))
}
}
@@ -14289,10 +14289,10 @@ func (x *SealWriteCacheResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -14618,10 +14618,10 @@ func (x *DetachShardsRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -14899,10 +14899,10 @@ func (x *DetachShardsResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
diff --git a/pkg/services/control/types_frostfs.pb.go b/pkg/services/control/types_frostfs.pb.go
index 9aff26a98..3cc37245f 100644
--- a/pkg/services/control/types_frostfs.pb.go
+++ b/pkg/services/control/types_frostfs.pb.go
@@ -549,7 +549,7 @@ func (x *NodeInfo) EmitProtobuf(mm *easyproto.MessageMarshaler) {
mm.AppendString(2, x.Addresses[j])
}
for i := range x.Attributes {
- if x.Attributes[i] != nil && x.Attributes[i].StableSize() != 0 {
+ if x.Attributes[i] != nil {
x.Attributes[i].EmitProtobuf(mm.AppendMessage(3))
}
}
@@ -817,7 +817,7 @@ func (x *Netmap) EmitProtobuf(mm *easyproto.MessageMarshaler) {
mm.AppendUint64(1, x.Epoch)
}
for i := range x.Nodes {
- if x.Nodes[i] != nil && x.Nodes[i].StableSize() != 0 {
+ if x.Nodes[i] != nil {
x.Nodes[i].EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -1013,7 +1013,7 @@ func (x *ShardInfo) EmitProtobuf(mm *easyproto.MessageMarshaler) {
mm.AppendString(2, x.MetabasePath)
}
for i := range x.Blobstor {
- if x.Blobstor[i] != nil && x.Blobstor[i].StableSize() != 0 {
+ if x.Blobstor[i] != nil {
x.Blobstor[i].EmitProtobuf(mm.AppendMessage(3))
}
}
diff --git a/pkg/services/tree/service_frostfs.pb.go b/pkg/services/tree/service_frostfs.pb.go
index c4d44253d..3c6ba21b7 100644
--- a/pkg/services/tree/service_frostfs.pb.go
+++ b/pkg/services/tree/service_frostfs.pb.go
@@ -70,7 +70,7 @@ func (x *AddRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
mm.AppendUint64(3, x.ParentId)
}
for i := range x.Meta {
- if x.Meta[i] != nil && x.Meta[i].StableSize() != 0 {
+ if x.Meta[i] != nil {
x.Meta[i].EmitProtobuf(mm.AppendMessage(4))
}
}
@@ -346,10 +346,10 @@ func (x *AddRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -661,10 +661,10 @@ func (x *AddResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -852,7 +852,7 @@ func (x *AddByPathRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
mm.AppendString(4, x.Path[j])
}
for i := range x.Meta {
- if x.Meta[i] != nil && x.Meta[i].StableSize() != 0 {
+ if x.Meta[i] != nil {
x.Meta[i].EmitProtobuf(mm.AppendMessage(5))
}
}
@@ -1168,10 +1168,10 @@ func (x *AddByPathRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -1530,10 +1530,10 @@ func (x *AddByPathResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -1938,10 +1938,10 @@ func (x *RemoveRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -2219,10 +2219,10 @@ func (x *RemoveResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -2410,7 +2410,7 @@ func (x *MoveRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
mm.AppendUint64(4, x.NodeId)
}
for i := range x.Meta {
- if x.Meta[i] != nil && x.Meta[i].StableSize() != 0 {
+ if x.Meta[i] != nil {
x.Meta[i].EmitProtobuf(mm.AppendMessage(5))
}
}
@@ -2712,10 +2712,10 @@ func (x *MoveRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -2993,10 +2993,10 @@ func (x *MoveResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -3553,10 +3553,10 @@ func (x *GetNodeByPathRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -3734,7 +3734,7 @@ func (x *GetNodeByPathResponse_Info) EmitProtobuf(mm *easyproto.MessageMarshaler
mm.AppendUint64(2, x.Timestamp)
}
for i := range x.Meta {
- if x.Meta[i] != nil && x.Meta[i].StableSize() != 0 {
+ if x.Meta[i] != nil {
x.Meta[i].EmitProtobuf(mm.AppendMessage(3))
}
}
@@ -3967,7 +3967,7 @@ func (x *GetNodeByPathResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler
return
}
for i := range x.Nodes {
- if x.Nodes[i] != nil && x.Nodes[i].StableSize() != 0 {
+ if x.Nodes[i] != nil {
x.Nodes[i].EmitProtobuf(mm.AppendMessage(1))
}
}
@@ -4136,10 +4136,10 @@ func (x *GetNodeByPathResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -4506,7 +4506,7 @@ func (x *GetSubTreeRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if len(x.BearerToken) != 0 {
mm.AppendBytes(5, x.BearerToken)
}
- if x.OrderBy != nil && x.OrderBy.StableSize() != 0 {
+ if x.OrderBy != nil {
x.OrderBy.EmitProtobuf(mm.AppendMessage(6))
}
}
@@ -4803,10 +4803,10 @@ func (x *GetSubTreeRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -4993,7 +4993,7 @@ func (x *GetSubTreeResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
mm.AppendUint64(3, x.Timestamp[j])
}
for i := range x.Meta {
- if x.Meta[i] != nil && x.Meta[i].StableSize() != 0 {
+ if x.Meta[i] != nil {
x.Meta[i].EmitProtobuf(mm.AppendMessage(4))
}
}
@@ -5282,10 +5282,10 @@ func (x *GetSubTreeResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -5597,10 +5597,10 @@ func (x *TreeListRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -5926,10 +5926,10 @@ func (x *TreeListResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -6102,7 +6102,7 @@ func (x *ApplyRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if len(x.TreeId) != 0 {
mm.AppendString(2, x.TreeId)
}
- if x.Operation != nil && x.Operation.StableSize() != 0 {
+ if x.Operation != nil {
x.Operation.EmitProtobuf(mm.AppendMessage(3))
}
}
@@ -6307,10 +6307,10 @@ func (x *ApplyRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -6588,10 +6588,10 @@ func (x *ApplyResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -6996,10 +6996,10 @@ func (x *GetOpLogRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -7162,7 +7162,7 @@ func (x *GetOpLogResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Operation != nil && x.Operation.StableSize() != 0 {
+ if x.Operation != nil {
x.Operation.EmitProtobuf(mm.AppendMessage(1))
}
}
@@ -7315,10 +7315,10 @@ func (x *GetOpLogResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -7596,10 +7596,10 @@ func (x *HealthcheckResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -7877,10 +7877,10 @@ func (x *HealthcheckRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
From d6b42972a81f18d4351d81b154f75815f39546ea Mon Sep 17 00:00:00 2001
From: Airat Arifullin
Date: Tue, 27 Aug 2024 16:09:13 +0300
Subject: [PATCH 059/705] [#1338] object: Fix audit patch stream
Signed-off-by: Airat Arifullin
---
pkg/services/object/audit.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/pkg/services/object/audit.go b/pkg/services/object/audit.go
index b924386d1..39e1f9f2d 100644
--- a/pkg/services/object/audit.go
+++ b/pkg/services/object/audit.go
@@ -204,7 +204,7 @@ func (a *auditPatchStream) CloseAndRecv(ctx context.Context) (*object.PatchRespo
if err != nil {
a.failed = true
}
- a.objectID = resp.GetBody().ObjectID
+ a.objectID = resp.GetBody().GetObjectID()
audit.LogRequestWithKey(a.log, objectGRPC.ObjectService_Patch_FullMethodName, a.key,
audit.TargetFromContainerIDObjectID(a.containerID, a.objectID),
!a.failed)
From 6488ddee882ddb2facd8554aa274986076de0bbc Mon Sep 17 00:00:00 2001
From: Airat Arifullin
Date: Tue, 27 Aug 2024 16:09:59 +0300
Subject: [PATCH 060/705] [#1338] object: Fix range provider in `Patch` handler
Signed-off-by: Airat Arifullin
---
pkg/services/object/patch/range_provider.go | 12 ++++++++++++
1 file changed, 12 insertions(+)
diff --git a/pkg/services/object/patch/range_provider.go b/pkg/services/object/patch/range_provider.go
index 755c5bf60..cb3f7c342 100644
--- a/pkg/services/object/patch/range_provider.go
+++ b/pkg/services/object/patch/range_provider.go
@@ -30,6 +30,12 @@ type rangeProvider struct {
var _ patcherSDK.RangeProvider = (*rangeProvider)(nil)
func (r *rangeProvider) GetRange(ctx context.Context, rng *objectSDK.Range) io.Reader {
+ // Remote GetRange request to a container node uses an SDK-client that fails range validation
+ // with zero-length. However, from the patcher's point of view, such request is still valid.
+ if rng.GetLength() == 0 {
+ return &nopReader{}
+ }
+
pipeReader, pipeWriter := io.Pipe()
var rngPrm getsvc.RangePrm
@@ -61,3 +67,9 @@ func (r *rangeProvider) GetRange(ctx context.Context, rng *objectSDK.Range) io.R
return pipeReader
}
+
+type nopReader struct{}
+
+func (nopReader) Read(_ []byte) (int, error) {
+ return 0, io.EOF
+}
From 7abbdca0641f1526c347da60012f437be57254e8 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Wed, 28 Aug 2024 13:56:45 +0300
Subject: [PATCH 061/705] [#1340] getSvc: Fix access denied error handling
Signed-off-by: Dmitrii Stepanov
---
pkg/services/object/get/get.go | 8 +++++++
pkg/services/object/get/remote.go | 7 +++++-
pkg/services/object/get/v2/get_forwarder.go | 26 ++++++++++++---------
3 files changed, 29 insertions(+), 12 deletions(-)
diff --git a/pkg/services/object/get/get.go b/pkg/services/object/get/get.go
index 07a2f3a72..03b7f8bf2 100644
--- a/pkg/services/object/get/get.go
+++ b/pkg/services/object/get/get.go
@@ -2,9 +2,11 @@ package getsvc
import (
"context"
+ "errors"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
+ apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"go.uber.org/zap"
)
@@ -120,6 +122,12 @@ func (exec *request) analyzeStatus(ctx context.Context, execCnr bool) {
exec.log.Debug(logs.OperationFinishedWithError,
zap.Error(exec.err),
)
+ var errAccessDenied *apistatus.ObjectAccessDenied
+ if execCnr && errors.As(exec.err, &errAccessDenied) {
+ // Local get can't return access denied error, so this error was returned by
+ // write to the output stream. So there is no need to try to find object on other nodes.
+ return
+ }
if execCnr {
exec.executeOnContainer(ctx)
diff --git a/pkg/services/object/get/remote.go b/pkg/services/object/get/remote.go
index ce9abfe1c..163767c43 100644
--- a/pkg/services/object/get/remote.go
+++ b/pkg/services/object/get/remote.go
@@ -31,6 +31,7 @@ func (r *request) processNode(ctx context.Context, info client.NodeInfo) bool {
var errECInfo *objectSDK.ECInfoError
var errRemoved *apistatus.ObjectAlreadyRemoved
var errOutOfRange *apistatus.ObjectOutOfRange
+ var errAccessDenied *apistatus.ObjectAccessDenied
switch {
default:
@@ -38,7 +39,11 @@ func (r *request) processNode(ctx context.Context, info client.NodeInfo) bool {
if r.status != statusEC {
// for raw requests, continue to collect other parts
r.status = statusUndefined
- r.err = new(apistatus.ObjectNotFound)
+ if errors.As(err, &errAccessDenied) {
+ r.err = err
+ } else {
+ r.err = new(apistatus.ObjectNotFound)
+ }
}
return false
case err == nil:
diff --git a/pkg/services/object/get/v2/get_forwarder.go b/pkg/services/object/get/v2/get_forwarder.go
index 774f98643..18194c740 100644
--- a/pkg/services/object/get/v2/get_forwarder.go
+++ b/pkg/services/object/get/v2/get_forwarder.go
@@ -23,12 +23,14 @@ import (
)
type getRequestForwarder struct {
- OnceResign sync.Once
- OnceHeaderSending sync.Once
- GlobalProgress int
- Key *ecdsa.PrivateKey
- Request *objectV2.GetRequest
- Stream *streamObjectWriter
+ OnceResign sync.Once
+ GlobalProgress int
+ Key *ecdsa.PrivateKey
+ Request *objectV2.GetRequest
+ Stream *streamObjectWriter
+
+ headerSent bool
+ headerSentGuard sync.Mutex
}
func (f *getRequestForwarder) forwardRequestToNode(ctx context.Context, addr network.Address, c client.MultiAddressClient, pubkey []byte) (*objectSDK.Object, error) {
@@ -83,13 +85,15 @@ func (f *getRequestForwarder) writeHeader(ctx context.Context, v *objectV2.GetOb
obj.SetSignature(v.GetSignature())
obj.SetHeader(v.GetHeader())
- var err error
- f.OnceHeaderSending.Do(func() {
- err = f.Stream.WriteHeader(ctx, objectSDK.NewFromV2(obj))
- })
- if err != nil {
+ f.headerSentGuard.Lock()
+ defer f.headerSentGuard.Unlock()
+ if f.headerSent {
+ return nil
+ }
+ if err := f.Stream.WriteHeader(ctx, objectSDK.NewFromV2(obj)); err != nil {
return errCouldNotWriteObjHeader(err)
}
+ f.headerSent = true
return nil
}
From 01b6f1733cbc9d0ce133b5970a28817423686e2c Mon Sep 17 00:00:00 2001
From: Evgenii Stratonikov
Date: Wed, 28 Aug 2024 14:29:07 +0300
Subject: [PATCH 062/705] [#1341] Makefile: Build linter with -trimpath
Fix error with go1.23:
```
Error: build linters: unable to load custom analyzer "truecloudlab-linters": ../linters/bin/external_linters.so, plugin.Open("/repo/frostfs/linters/bin/external_linters"): plugin was built with a different version of package cmp
Failed executing command with error: build linters: unable to load custom analyzer "truecloudlab-linters": ../linters/bin/external_linters.so, plugin.Open("/repo/frostfs/linters/bin/external_linters"): plugin was built with a different version of package cmp
```
Signed-off-by: Evgenii Stratonikov
---
Makefile | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/Makefile b/Makefile
index 94a8a14c3..3a6dea7c3 100755
--- a/Makefile
+++ b/Makefile
@@ -9,7 +9,7 @@ HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')"
GO_VERSION ?= 1.22
LINT_VERSION ?= 1.60.1
-TRUECLOUDLAB_LINT_VERSION ?= 0.0.6
+TRUECLOUDLAB_LINT_VERSION ?= 0.0.7
PROTOC_VERSION ?= 25.0
PROTOGEN_FROSTFS_VERSION ?= $(shell go list -f '{{.Version}}' -m git.frostfs.info/TrueCloudLab/frostfs-api-go/v2)
PROTOC_OS_VERSION=osx-x86_64
@@ -197,7 +197,7 @@ lint-install:
@@make -C $(TMP_DIR)/linters lib CGO_ENABLED=1 OUT_DIR=$(OUTPUT_LINT_DIR)
@rm -rf $(TMP_DIR)/linters
@rmdir $(TMP_DIR) 2>/dev/null || true
- @CGO_ENABLED=1 GOBIN=$(LINT_DIR) go install github.com/golangci/golangci-lint/cmd/golangci-lint@v$(LINT_VERSION)
+ @CGO_ENABLED=1 GOBIN=$(LINT_DIR) go install -trimpath github.com/golangci/golangci-lint/cmd/golangci-lint@v$(LINT_VERSION)
# Run linters
lint:
From 7e97df4878b9bf95f88712394bb21201eb072012 Mon Sep 17 00:00:00 2001
From: Evgenii Stratonikov
Date: Wed, 28 Aug 2024 14:31:35 +0300
Subject: [PATCH 063/705] [#1341] Makefile: Update golangci-lint
Signed-off-by: Evgenii Stratonikov
---
Makefile | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/Makefile b/Makefile
index 3a6dea7c3..71492ef17 100755
--- a/Makefile
+++ b/Makefile
@@ -8,7 +8,7 @@ HUB_IMAGE ?= truecloudlab/frostfs
HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')"
GO_VERSION ?= 1.22
-LINT_VERSION ?= 1.60.1
+LINT_VERSION ?= 1.60.3
TRUECLOUDLAB_LINT_VERSION ?= 0.0.7
PROTOC_VERSION ?= 25.0
PROTOGEN_FROSTFS_VERSION ?= $(shell go list -f '{{.Version}}' -m git.frostfs.info/TrueCloudLab/frostfs-api-go/v2)
From 03976c6ed5df58b83f788a18416e27f9144342b1 Mon Sep 17 00:00:00 2001
From: Evgenii Stratonikov
Date: Wed, 28 Aug 2024 14:45:57 +0300
Subject: [PATCH 064/705] [#1341] .golangci.yml: Replace exportloopref with
copyloopvar
exportloopref is deprecated.
gopatch:
```
@@
var index, value identifier
var slice expression
@@
for index, value := range slice {
...
-value := value
...
}
@@
var index, value identifier
var slice expression
@@
for index, value := range slice {
...
-index := index
...
}
@@
var value identifier
var channel expression
@@
for value := range channel {
...
-value := value
...
}
```
Signed-off-by: Evgenii Stratonikov
---
.golangci.yml | 2 +-
cmd/frostfs-adm/internal/modules/morph/generate/generate.go | 2 --
cmd/frostfs-cli/modules/container/list.go | 1 -
cmd/frostfs-cli/modules/object/nodes.go | 3 ---
.../blobstor/blobovniczatree/rebuild.go | 3 ---
pkg/local_object_storage/blobstor/info.go | 1 -
pkg/local_object_storage/engine/control.go | 2 --
pkg/local_object_storage/engine/shards.go | 1 -
pkg/local_object_storage/engine/writecache.go | 1 -
pkg/local_object_storage/pilorama/forest_test.go | 1 -
pkg/local_object_storage/writecache/flush_test.go | 1 -
pkg/services/object/get/assemblerec.go | 1 -
pkg/services/object/put/common.go | 1 -
pkg/services/object/put/ec.go | 1 -
pkg/services/policer/ec.go | 2 --
pkg/services/tree/sync.go | 6 +-----
pkg/services/tree/sync_test.go | 2 --
17 files changed, 2 insertions(+), 29 deletions(-)
diff --git a/.golangci.yml b/.golangci.yml
index 2e9e78fc3..971f0d0e7 100644
--- a/.golangci.yml
+++ b/.golangci.yml
@@ -67,7 +67,7 @@ linters:
- bidichk
- durationcheck
- exhaustive
- - exportloopref
+ - copyloopvar
- gofmt
- goimports
- misspell
diff --git a/cmd/frostfs-adm/internal/modules/morph/generate/generate.go b/cmd/frostfs-adm/internal/modules/morph/generate/generate.go
index c7de599e5..7af776797 100644
--- a/cmd/frostfs-adm/internal/modules/morph/generate/generate.go
+++ b/cmd/frostfs-adm/internal/modules/morph/generate/generate.go
@@ -73,7 +73,6 @@ func initializeWallets(v *viper.Viper, walletDir string, size int) ([]string, er
return nil, fmt.Errorf("can't fetch password: %w", err)
}
- i := i
errG.Go(func() error {
p := filepath.Join(walletDir, innerring.GlagoliticLetter(i).String()+".json")
f, err := os.OpenFile(p, os.O_CREATE, 0o644)
@@ -107,7 +106,6 @@ func initializeWallets(v *viper.Viper, walletDir string, size int) ([]string, er
// Create consensus account with 2*N/3+1 multi-signature.
bftCount := smartcontract.GetDefaultHonestNodeCount(size)
for i := range wallets {
- i := i
ps := pubs.Copy()
errG.Go(func() error {
if err := addMultisigAccount(wallets[i], majCount, constants.CommitteeAccountName, passwords[i], ps); err != nil {
diff --git a/cmd/frostfs-cli/modules/container/list.go b/cmd/frostfs-cli/modules/container/list.go
index a1410d7a0..6d0019ec4 100644
--- a/cmd/frostfs-cli/modules/container/list.go
+++ b/cmd/frostfs-cli/modules/container/list.go
@@ -70,7 +70,6 @@ var listContainersCmd = &cobra.Command{
continue
}
- cnrID := cnrID
prmGet.ClientParams.ContainerID = &cnrID
res, err := internalclient.GetContainer(cmd.Context(), prmGet)
if err != nil {
diff --git a/cmd/frostfs-cli/modules/object/nodes.go b/cmd/frostfs-cli/modules/object/nodes.go
index 4efe04d16..896f6f17f 100644
--- a/cmd/frostfs-cli/modules/object/nodes.go
+++ b/cmd/frostfs-cli/modules/object/nodes.go
@@ -393,8 +393,6 @@ func getActualPlacement(cmd *cobra.Command, netmap *netmapSDK.NetMap, pk *ecdsa.
eg, egCtx := errgroup.WithContext(cmd.Context())
for _, cand := range candidates {
- cand := cand
-
eg.Go(func() error {
cli, err := createClient(egCtx, cmd, cand, pk)
if err != nil {
@@ -405,7 +403,6 @@ func getActualPlacement(cmd *cobra.Command, netmap *netmapSDK.NetMap, pk *ecdsa.
}
for _, object := range objects {
- object := object
eg.Go(func() error {
stored, err := isObjectStoredOnNode(egCtx, cmd, object.containerID, object.objectID, cli, pk)
resultMtx.Lock()
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go
index 93ef8ba2e..cfc17cfae 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go
@@ -160,9 +160,6 @@ func (b *Blobovniczas) moveObjects(ctx context.Context, blz *blobovnicza.Blobovn
eg, egCtx := errgroup.WithContext(ctx)
for addr, data := range batch {
- addr := addr
- data := data
-
if err := limiter.AcquireWorkSlot(egCtx); err != nil {
_ = eg.Wait()
return result.Load(), err
diff --git a/pkg/local_object_storage/blobstor/info.go b/pkg/local_object_storage/blobstor/info.go
index 8a5bb870a..c1c47f3bb 100644
--- a/pkg/local_object_storage/blobstor/info.go
+++ b/pkg/local_object_storage/blobstor/info.go
@@ -43,7 +43,6 @@ func (b *BlobStor) ObjectsCount(ctx context.Context) (uint64, error) {
eg, egCtx := errgroup.WithContext(ctx)
for i := range b.storage {
- i := i
eg.Go(func() error {
v, e := b.storage[i].Storage.ObjectsCount(egCtx)
if e != nil {
diff --git a/pkg/local_object_storage/engine/control.go b/pkg/local_object_storage/engine/control.go
index 03196400a..4778cf539 100644
--- a/pkg/local_object_storage/engine/control.go
+++ b/pkg/local_object_storage/engine/control.go
@@ -82,8 +82,6 @@ func (e *StorageEngine) Init(ctx context.Context) error {
}
for id, sh := range e.shards {
- id := id
- sh := sh
eg.Go(func() error {
if err := sh.Init(ctx); err != nil {
errCh <- shardInitError{
diff --git a/pkg/local_object_storage/engine/shards.go b/pkg/local_object_storage/engine/shards.go
index 56d4fcd4a..980b38a63 100644
--- a/pkg/local_object_storage/engine/shards.go
+++ b/pkg/local_object_storage/engine/shards.go
@@ -367,7 +367,6 @@ func (e *StorageEngine) closeShards(deletedShards []hashedShard) error {
var multiErrGuard sync.Mutex
var eg errgroup.Group
for _, sh := range deletedShards {
- sh := sh
eg.Go(func() error {
err := sh.SetMode(mode.Disabled)
if err != nil {
diff --git a/pkg/local_object_storage/engine/writecache.go b/pkg/local_object_storage/engine/writecache.go
index 3e8f387ef..7710bc7f4 100644
--- a/pkg/local_object_storage/engine/writecache.go
+++ b/pkg/local_object_storage/engine/writecache.go
@@ -102,7 +102,6 @@ func (e *StorageEngine) SealWriteCache(ctx context.Context, prm SealWriteCachePr
eg, egCtx := errgroup.WithContext(ctx)
for _, shardID := range prm.ShardIDs {
- shardID := shardID
eg.Go(func() error {
e.mtx.RLock()
sh, ok := e.shards[shardID.String()]
diff --git a/pkg/local_object_storage/pilorama/forest_test.go b/pkg/local_object_storage/pilorama/forest_test.go
index ecca9842f..41d7a567c 100644
--- a/pkg/local_object_storage/pilorama/forest_test.go
+++ b/pkg/local_object_storage/pilorama/forest_test.go
@@ -1459,7 +1459,6 @@ func testTreeLastSyncHeight(t *testing.T, f ForestStorage) {
func TestForest_ListTrees(t *testing.T) {
for i := range providers {
- i := i
t.Run(providers[i].name, func(t *testing.T) {
testTreeListTrees(t, providers[i].construct)
})
diff --git a/pkg/local_object_storage/writecache/flush_test.go b/pkg/local_object_storage/writecache/flush_test.go
index 3c951bebe..a637da45d 100644
--- a/pkg/local_object_storage/writecache/flush_test.go
+++ b/pkg/local_object_storage/writecache/flush_test.go
@@ -170,7 +170,6 @@ func runFlushTest[Option any](
t.Run("ignore errors", func(t *testing.T) {
for _, f := range failures {
- f := f
t.Run(f.Desc, func(t *testing.T) {
errCountOpt, errCount := errCountOption()
wc, bs, mb := newCache(t, createCacheFn, smallSize, errCountOpt)
diff --git a/pkg/services/object/get/assemblerec.go b/pkg/services/object/get/assemblerec.go
index 6a02673c3..dde0d7dad 100644
--- a/pkg/services/object/get/assemblerec.go
+++ b/pkg/services/object/get/assemblerec.go
@@ -170,7 +170,6 @@ func (a *assemblerec) processECNodesRequests(ctx context.Context, nodes []placem
eg.SetLimit(dataCount)
for _, ch := range a.ecInfo.localChunks {
- ch := ch
eg.Go(func() error {
select {
case <-ctx.Done():
diff --git a/pkg/services/object/put/common.go b/pkg/services/object/put/common.go
index 6696a192b..cbb7f5f33 100644
--- a/pkg/services/object/put/common.go
+++ b/pkg/services/object/put/common.go
@@ -71,7 +71,6 @@ func (n *nodeIterator) forEachAddress(ctx context.Context, traverser *placement.
var wg sync.WaitGroup
for _, addr := range addrs {
- addr := addr
if ok := n.mExclude[string(addr.PublicKey())]; ok != nil {
if *ok {
traverser.SubmitSuccess()
diff --git a/pkg/services/object/put/ec.go b/pkg/services/object/put/ec.go
index fbb51912c..1fadf65fe 100644
--- a/pkg/services/object/put/ec.go
+++ b/pkg/services/object/put/ec.go
@@ -216,7 +216,6 @@ func (e *ecWriter) writeRawObject(ctx context.Context, obj *objectSDK.Object) er
}
for idx := range parts {
- idx := idx
eg.Go(func() error {
return e.writePart(egCtx, parts[idx], idx, nodes, visited)
})
diff --git a/pkg/services/policer/ec.go b/pkg/services/policer/ec.go
index 0a118797d..61a65fc21 100644
--- a/pkg/services/policer/ec.go
+++ b/pkg/services/policer/ec.go
@@ -357,8 +357,6 @@ func (p *Policer) collectExistedChunks(ctx context.Context, objInfo objectcore.I
parts := make([]*objectSDK.Object, objInfo.ECInfo.Total)
errGroup, egCtx := errgroup.WithContext(ctx)
for idx, nodes := range existedChunks {
- idx := idx
- nodes := nodes
errGroup.Go(func() error {
var objID oid.Address
objID.SetContainer(parentAddress.Container())
diff --git a/pkg/services/tree/sync.go b/pkg/services/tree/sync.go
index 0f85f50b1..be22074a5 100644
--- a/pkg/services/tree/sync.go
+++ b/pkg/services/tree/sync.go
@@ -190,8 +190,6 @@ func (s *Service) applyOperationStream(ctx context.Context, cid cid.ID, treeID s
var prev *pilorama.Move
for m := range operationStream {
- m := m
-
// skip already applied op
if prev != nil && prev.Time == m.Time {
continue
@@ -287,8 +285,6 @@ func (s *Service) synchronizeTree(ctx context.Context, cid cid.ID, from uint64,
allNodesSynced.Store(true)
for i, n := range nodes {
- i := i
- n := n
errGroup.Go(func() error {
var nodeSynced bool
n.IterateNetworkEndpoints(func(addr string) bool {
@@ -421,7 +417,7 @@ func (s *Service) syncContainers(ctx context.Context, cnrs []cid.ID) {
var wg sync.WaitGroup
for _, cnr := range cnrs {
wg.Add(1)
- cnr := cnr
+
err := s.syncPool.Submit(func() {
defer wg.Done()
s.log.Debug(logs.TreeSyncingContainerTrees, zap.Stringer("cid", cnr))
diff --git a/pkg/services/tree/sync_test.go b/pkg/services/tree/sync_test.go
index 190b4ccbb..497d90554 100644
--- a/pkg/services/tree/sync_test.go
+++ b/pkg/services/tree/sync_test.go
@@ -51,8 +51,6 @@ func Test_mergeOperationStreams(t *testing.T) {
// generate and put values to all chans
for i, ch := range nodeOpChans {
- i := i
- ch := ch
go func() {
for _, tm := range tt.opTimes[i] {
op := &pilorama.Move{}
From 6c2146bbc13e118d1518010d144ebc4dfadd5d6d Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Wed, 28 Aug 2024 18:32:30 +0300
Subject: [PATCH 065/705] [#1334] metabase: Add upgrade from v2 to v3
Signed-off-by: Dmitrii Stepanov
---
.../internal/modules/metabase/root.go | 15 +
.../internal/modules/metabase/upgrade.go | 86 +++++
cmd/frostfs-adm/internal/modules/root.go | 2 +
pkg/local_object_storage/metabase/upgrade.go | 364 ++++++++++++++++++
.../metabase/upgrade_test.go | 215 +++++++++++
pkg/local_object_storage/metabase/util.go | 9 +-
pkg/local_object_storage/metabase/version.go | 15 +
7 files changed, 703 insertions(+), 3 deletions(-)
create mode 100644 cmd/frostfs-adm/internal/modules/metabase/root.go
create mode 100644 cmd/frostfs-adm/internal/modules/metabase/upgrade.go
create mode 100644 pkg/local_object_storage/metabase/upgrade.go
create mode 100644 pkg/local_object_storage/metabase/upgrade_test.go
diff --git a/cmd/frostfs-adm/internal/modules/metabase/root.go b/cmd/frostfs-adm/internal/modules/metabase/root.go
new file mode 100644
index 000000000..5b21ed273
--- /dev/null
+++ b/cmd/frostfs-adm/internal/modules/metabase/root.go
@@ -0,0 +1,15 @@
+package metabase
+
+import "github.com/spf13/cobra"
+
+// RootCmd is a root command of config section.
+var RootCmd = &cobra.Command{
+ Use: "metabase",
+ Short: "Section for metabase commands",
+}
+
+func init() {
+ RootCmd.AddCommand(UpgradeCmd)
+
+ initUpgradeCommand()
+}
diff --git a/cmd/frostfs-adm/internal/modules/metabase/upgrade.go b/cmd/frostfs-adm/internal/modules/metabase/upgrade.go
new file mode 100644
index 000000000..83e085df4
--- /dev/null
+++ b/cmd/frostfs-adm/internal/modules/metabase/upgrade.go
@@ -0,0 +1,86 @@
+package metabase
+
+import (
+ "errors"
+ "fmt"
+ "time"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
+ engineconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine"
+ shardconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard"
+ meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
+ "github.com/spf13/cobra"
+)
+
+const (
+ pathFlag = "path"
+ noCompactFlag = "no-compact"
+)
+
+var errNoPathsFound = errors.New("no metabase paths found")
+
+var path string
+
+var UpgradeCmd = &cobra.Command{
+ Use: "upgrade",
+ Short: "Upgrade metabase to latest version",
+ RunE: upgrade,
+}
+
+func upgrade(cmd *cobra.Command, _ []string) error {
+ configFile, err := cmd.Flags().GetString(commonflags.ConfigFlag)
+ if err != nil {
+ return err
+ }
+ configDir, err := cmd.Flags().GetString(commonflags.ConfigDirFlag)
+ if err != nil {
+ return err
+ }
+ noCompact, _ := cmd.Flags().GetBool(noCompactFlag)
+ var paths []string
+ if path != "" {
+ paths = append(paths, path)
+ }
+ appCfg := config.New(configFile, configDir, config.EnvPrefix)
+ if err := engineconfig.IterateShards(appCfg, false, func(sc *shardconfig.Config) error {
+ paths = append(paths, sc.Metabase().Path())
+ return nil
+ }); err != nil {
+ return fmt.Errorf("failed to get metabase paths: %w", err)
+ }
+ if len(paths) == 0 {
+ return errNoPathsFound
+ }
+ cmd.Println("found", len(paths), "metabases:")
+ for i, path := range paths {
+ cmd.Println(i+1, ":", path)
+ }
+ result := make(map[string]bool)
+ for _, path := range paths {
+ cmd.Println("upgrading metabase", path, "...")
+ if err := meta.Upgrade(cmd.Context(), path, !noCompact, func(a ...any) {
+ cmd.Println(append([]any{time.Now().Format(time.RFC3339), ":", path, ":"}, a...)...)
+ }); err != nil {
+ result[path] = false
+ cmd.Println("error: failed to upgrade metabase", path, ":", err)
+ } else {
+ result[path] = true
+ cmd.Println("metabase", path, "upgraded successfully")
+ }
+ }
+ for mb, ok := range result {
+ if ok {
+ cmd.Println(mb, ": success")
+ } else {
+ cmd.Println(mb, ": failed")
+ }
+ }
+ return nil
+}
+
+func initUpgradeCommand() {
+ flags := UpgradeCmd.Flags()
+ flags.StringVar(&path, pathFlag, "", "Path to metabase file")
+ flags.Bool(noCompactFlag, false, "Do not compact upgraded metabase file")
+}
diff --git a/cmd/frostfs-adm/internal/modules/root.go b/cmd/frostfs-adm/internal/modules/root.go
index 8595483ab..defd898c8 100644
--- a/cmd/frostfs-adm/internal/modules/root.go
+++ b/cmd/frostfs-adm/internal/modules/root.go
@@ -5,6 +5,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/storagecfg"
"git.frostfs.info/TrueCloudLab/frostfs-node/misc"
@@ -41,6 +42,7 @@ func init() {
rootCmd.AddCommand(config.RootCmd)
rootCmd.AddCommand(morph.RootCmd)
rootCmd.AddCommand(storagecfg.RootCmd)
+ rootCmd.AddCommand(metabase.RootCmd)
rootCmd.AddCommand(autocomplete.Command("frostfs-adm"))
rootCmd.AddCommand(gendoc.Command(rootCmd, gendoc.Options{}))
diff --git a/pkg/local_object_storage/metabase/upgrade.go b/pkg/local_object_storage/metabase/upgrade.go
new file mode 100644
index 000000000..014e50286
--- /dev/null
+++ b/pkg/local_object_storage/metabase/upgrade.go
@@ -0,0 +1,364 @@
+package meta
+
+import (
+ "bytes"
+ "context"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "os"
+ "strconv"
+ "sync/atomic"
+ "time"
+
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "go.etcd.io/bbolt"
+ "golang.org/x/sync/errgroup"
+)
+
+const (
+ upgradeLogFrequency = 50_000
+ upgradeWorkersCount = 1_000
+ compactMaxTxSize = 256 << 20
+ upgradeTimeout = 1 * time.Second
+)
+
+var updates = map[uint64]func(ctx context.Context, db *bbolt.DB, log func(a ...any)) error{
+ 2: upgradeFromV2ToV3,
+}
+
+func Upgrade(ctx context.Context, path string, compact bool, log func(a ...any)) error {
+ if _, err := os.Stat(path); err != nil {
+ return fmt.Errorf("check metabase existence: %w", err)
+ }
+ opts := bbolt.DefaultOptions
+ opts.Timeout = upgradeTimeout
+ db, err := bbolt.Open(path, os.ModePerm, opts)
+ if err != nil {
+ return fmt.Errorf("open metabase: %w", err)
+ }
+ var version uint64
+ if err := db.View(func(tx *bbolt.Tx) error {
+ var e error
+ version, e = currentVersion(tx)
+ return e
+ }); err != nil {
+ return err
+ }
+ updater, found := updates[version]
+ if !found {
+ return fmt.Errorf("unsupported version %d: no update available", version)
+ }
+ if err := updater(ctx, db, log); err != nil {
+ return fmt.Errorf("update metabase schema: %w", err)
+ }
+ if compact {
+ log("compacting metabase...")
+ err := compactDB(db)
+ if err != nil {
+ return fmt.Errorf("compact metabase: %w", err)
+ }
+ log("metabase compacted")
+ }
+ return db.Close()
+}
+
+func compactDB(db *bbolt.DB) error {
+ sourcePath := db.Path()
+ tmpFileName := sourcePath + "." + time.Now().Format(time.RFC3339)
+ f, err := os.Stat(sourcePath)
+ if err != nil {
+ return err
+ }
+ dst, err := bbolt.Open(tmpFileName, f.Mode(), &bbolt.Options{
+ Timeout: 100 * time.Millisecond,
+ })
+ if err != nil {
+ return fmt.Errorf("can't open new metabase to compact: %w", err)
+ }
+ if err := bbolt.Compact(dst, db, compactMaxTxSize); err != nil {
+ return fmt.Errorf("compact metabase: %w", errors.Join(err, dst.Close(), os.Remove(tmpFileName)))
+ }
+ if err := dst.Close(); err != nil {
+ return fmt.Errorf("close compacted metabase: %w", errors.Join(err, os.Remove(tmpFileName)))
+ }
+ if err := db.Close(); err != nil {
+ return fmt.Errorf("close source metabase: %w", errors.Join(err, os.Remove(tmpFileName)))
+ }
+ if err := os.Rename(tmpFileName, sourcePath); err != nil {
+ return fmt.Errorf("replace source metabase with compacted: %w", errors.Join(err, os.Remove(tmpFileName)))
+ }
+ return nil
+}
+
+func upgradeFromV2ToV3(ctx context.Context, db *bbolt.DB, log func(a ...any)) error {
+ if err := createExpirationEpochBuckets(ctx, db, log); err != nil {
+ return err
+ }
+ if err := dropUserAttributes(ctx, db, log); err != nil {
+ return err
+ }
+ if err := dropOwnerIDIndex(ctx, db, log); err != nil {
+ return err
+ }
+ if err := dropPayloadChecksumIndex(ctx, db, log); err != nil {
+ return err
+ }
+ return db.Update(func(tx *bbolt.Tx) error {
+ return updateVersion(tx, version)
+ })
+}
+
+type objectIDToExpEpoch struct {
+ containerID cid.ID
+ objectID oid.ID
+ expirationEpoch uint64
+}
+
+func createExpirationEpochBuckets(ctx context.Context, db *bbolt.DB, log func(a ...any)) error {
+ log("filling expiration epoch buckets...")
+ if err := db.Update(func(tx *bbolt.Tx) error {
+ _, err := tx.CreateBucketIfNotExists(expEpochToObjectBucketName)
+ return err
+ }); err != nil {
+ return err
+ }
+ objects := make(chan objectIDToExpEpoch)
+ eg, ctx := errgroup.WithContext(ctx)
+ eg.Go(func() error {
+ return selectObjectsWithExpirationEpoch(ctx, db, objects)
+ })
+ var count atomic.Uint64
+ for i := 0; i < upgradeWorkersCount; i++ {
+ eg.Go(func() error {
+ for {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ case obj, ok := <-objects:
+ if !ok {
+ return nil
+ }
+ if err := db.Batch(func(tx *bbolt.Tx) error {
+ if err := putUniqueIndexItem(tx, namedBucketItem{
+ name: expEpochToObjectBucketName,
+ key: expirationEpochKey(obj.expirationEpoch, obj.containerID, obj.objectID),
+ val: zeroValue,
+ }); err != nil {
+ return err
+ }
+ val := make([]byte, epochSize)
+ binary.LittleEndian.PutUint64(val, obj.expirationEpoch)
+ return putUniqueIndexItem(tx, namedBucketItem{
+ name: objectToExpirationEpochBucketName(obj.containerID, make([]byte, bucketKeySize)),
+ key: objectKey(obj.objectID, make([]byte, objectKeySize)),
+ val: val,
+ })
+ }); err != nil {
+ return err
+ }
+ }
+ if c := count.Add(1); c%upgradeLogFrequency == 0 {
+ log("expiration epoch filled for", c, "objects...")
+ }
+ }
+ })
+ }
+ err := eg.Wait()
+ if err != nil {
+ log("expiration epoch buckets completed completed with error:", err)
+ return err
+ }
+ log("filling expiration epoch buckets completed successfully, total", count.Load(), "objects")
+ return nil
+}
+
+func selectObjectsWithExpirationEpoch(ctx context.Context, db *bbolt.DB, objects chan objectIDToExpEpoch) error {
+ defer close(objects)
+
+ const batchSize = 1000
+ it := &objectsWithExpirationEpochBatchIterator{
+ lastAttributeKey: usrAttrPrefix,
+ }
+ for {
+ if err := getNextObjectsWithExpirationEpochBatch(ctx, db, it, batchSize); err != nil {
+ return err
+ }
+ for _, item := range it.items {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ case objects <- item:
+ }
+ }
+
+ if len(it.items) < batchSize {
+ return nil
+ }
+ it.items = nil
+ }
+}
+
+var (
+ usrAttrPrefix = []byte{userAttributePrefix}
+ errBatchSizeLimit = errors.New("batch size limit")
+)
+
+type objectsWithExpirationEpochBatchIterator struct {
+ lastAttributeKey []byte
+ lastAttributeValue []byte
+ lastAttrKeyValueItem []byte
+ items []objectIDToExpEpoch
+}
+
+// - {prefix}{containerID}{attributeKey} <- bucket
+// -- {attributeValue} <- bucket, expirationEpoch
+// --- {objectID}: zeroValue <- record
+
+func getNextObjectsWithExpirationEpochBatch(ctx context.Context, db *bbolt.DB, it *objectsWithExpirationEpochBatchIterator, batchSize int) error {
+ seekAttrValue := it.lastAttributeValue
+ seekAttrKVItem := it.lastAttrKeyValueItem
+ err := db.View(func(tx *bbolt.Tx) error {
+ attrKeyC := tx.Cursor()
+ for attrKey, _ := attrKeyC.Seek(it.lastAttributeKey); attrKey != nil && bytes.HasPrefix(attrKey, usrAttrPrefix); attrKey, _ = attrKeyC.Next() {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+ if len(attrKey) <= 1+cidSize {
+ continue
+ }
+ attributeKey := string(attrKey[1+cidSize:])
+ if attributeKey != objectV2.SysAttributeExpEpochNeoFS && attributeKey != objectV2.SysAttributeExpEpoch {
+ continue
+ }
+ var containerID cid.ID
+ if err := containerID.Decode(attrKey[1 : 1+cidSize]); err != nil {
+ return fmt.Errorf("decode container id from user attribute bucket: %w", err)
+ }
+ if err := iterateExpirationAttributeKeyBucket(ctx, tx.Bucket(attrKey), it, batchSize, containerID, attrKey, seekAttrValue, seekAttrKVItem); err != nil {
+ return err
+ }
+ seekAttrValue = nil
+ seekAttrKVItem = nil
+ }
+ return nil
+ })
+ if err != nil && !errors.Is(err, errBatchSizeLimit) {
+ return err
+ }
+ return nil
+}
+
+func iterateExpirationAttributeKeyBucket(ctx context.Context, b *bbolt.Bucket, it *objectsWithExpirationEpochBatchIterator, batchSize int, containerID cid.ID, attrKey, seekAttrValue, seekAttrKVItem []byte) error {
+ attrValueC := b.Cursor()
+ for attrValue, v := attrValueC.Seek(seekAttrValue); attrValue != nil; attrValue, v = attrValueC.Next() {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+ if v != nil {
+ continue // need to iterate over buckets, not records
+ }
+ expirationEpoch, err := strconv.ParseUint(string(attrValue), 10, 64)
+ if err != nil {
+ return fmt.Errorf("could not parse expiration epoch: %w", err)
+ }
+ expirationEpochBucket := b.Bucket(attrValue)
+ attrKeyValueC := expirationEpochBucket.Cursor()
+ for attrKeyValueItem, v := attrKeyValueC.Seek(seekAttrKVItem); attrKeyValueItem != nil; attrKeyValueItem, v = attrKeyValueC.Next() {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+ if v == nil {
+ continue // need to iterate over records, not buckets
+ }
+ if bytes.Equal(it.lastAttributeKey, attrKey) && bytes.Equal(it.lastAttributeValue, attrValue) && bytes.Equal(it.lastAttrKeyValueItem, attrKeyValueItem) {
+ continue
+ }
+ var objectID oid.ID
+ if err := objectID.Decode(attrKeyValueItem); err != nil {
+ return fmt.Errorf("decode object id from container '%s' expiration epoch %d: %w", containerID, expirationEpoch, err)
+ }
+ it.lastAttributeKey = bytes.Clone(attrKey)
+ it.lastAttributeValue = bytes.Clone(attrValue)
+ it.lastAttrKeyValueItem = bytes.Clone(attrKeyValueItem)
+ it.items = append(it.items, objectIDToExpEpoch{
+ containerID: containerID,
+ objectID: objectID,
+ expirationEpoch: expirationEpoch,
+ })
+ if len(it.items) == batchSize {
+ return errBatchSizeLimit
+ }
+ }
+ seekAttrKVItem = nil
+ }
+ return nil
+}
+
+func dropUserAttributes(ctx context.Context, db *bbolt.DB, log func(a ...any)) error {
+ return dropBucketsByPrefix(ctx, db, []byte{userAttributePrefix}, func(a ...any) {
+ log(append([]any{"user attributes:"}, a...)...)
+ })
+}
+
+func dropOwnerIDIndex(ctx context.Context, db *bbolt.DB, log func(a ...any)) error {
+ return dropBucketsByPrefix(ctx, db, []byte{ownerPrefix}, func(a ...any) {
+ log(append([]any{"owner ID index:"}, a...)...)
+ })
+}
+
+func dropPayloadChecksumIndex(ctx context.Context, db *bbolt.DB, log func(a ...any)) error {
+ return dropBucketsByPrefix(ctx, db, []byte{payloadHashPrefix}, func(a ...any) {
+ log(append([]any{"payload checksum:"}, a...)...)
+ })
+}
+
+func dropBucketsByPrefix(ctx context.Context, db *bbolt.DB, prefix []byte, log func(a ...any)) error {
+ log("deleting buckets...")
+ const batch = 1000
+ var count uint64
+ for {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+ var keys [][]byte
+ if err := db.View(func(tx *bbolt.Tx) error {
+ c := tx.Cursor()
+ for k, _ := c.Seek(prefix); k != nil && bytes.HasPrefix(k, prefix) && len(keys) < batch; k, _ = c.Next() {
+ keys = append(keys, bytes.Clone(k))
+ }
+ return nil
+ }); err != nil {
+ log("deleting buckets completed with an error:", err)
+ return err
+ }
+ if len(keys) == 0 {
+ log("deleting buckets completed successfully, deleted", count, "buckets")
+ return nil
+ }
+ if err := db.Update(func(tx *bbolt.Tx) error {
+ for _, k := range keys {
+ if err := tx.DeleteBucket(k); err != nil {
+ return err
+ }
+ }
+ return nil
+ }); err != nil {
+ log("deleting buckets completed with an error:", err)
+ return err
+ }
+ if count += uint64(len(keys)); count%upgradeLogFrequency == 0 {
+ log("deleted", count, "buckets")
+ }
+ }
+}
diff --git a/pkg/local_object_storage/metabase/upgrade_test.go b/pkg/local_object_storage/metabase/upgrade_test.go
new file mode 100644
index 000000000..dc3d7d07d
--- /dev/null
+++ b/pkg/local_object_storage/metabase/upgrade_test.go
@@ -0,0 +1,215 @@
+//go:build integration
+
+package meta
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "os"
+ "strconv"
+ "testing"
+ "time"
+
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "github.com/stretchr/testify/require"
+ "golang.org/x/sync/errgroup"
+)
+
+const upgradeFilePath = "/path/to/metabase.v2"
+
+func TestUpgradeV2ToV3(t *testing.T) {
+ path := createTempCopy(t, upgradeFilePath)
+ defer func() {
+ require.NoError(t, os.Remove(path))
+ }()
+ db := New(WithPath(path), WithEpochState(epochState{e: 1000}), WithLogger(test.NewLogger(t)))
+ require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
+ require.ErrorIs(t, db.Init(), ErrOutdatedVersion)
+ require.NoError(t, db.Close())
+ require.NoError(t, Upgrade(context.Background(), path, true, t.Log))
+ require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
+ require.NoError(t, db.Init())
+ require.NoError(t, db.Close())
+ fmt.Println()
+}
+
+func createTempCopy(t *testing.T, path string) string {
+ src, err := os.Open(path)
+ require.NoError(t, err)
+
+ tmpPath := upgradeFilePath + time.Now().Format(time.RFC3339)
+ dest, err := os.Create(tmpPath)
+ require.NoError(t, err)
+
+ _, err = io.Copy(dest, src)
+ require.NoError(t, err)
+
+ require.NoError(t, src.Close())
+ require.NoError(t, dest.Close())
+
+ return tmpPath
+}
+
+func TestGenerateMetabaseFile(t *testing.T) {
+ t.Skip("for generating db")
+ const (
+ containersCount = 10_000
+ simpleObjectsCount = 500_000
+ complexObjectsCount = 500_000 // x2
+ deletedByGCMarksCount = 100_000
+ deletedByTombstoneCount = 100_000 // x2
+ lockedCount = 100_000 // x2
+
+ allocSize = 128 << 20
+ generateWorkersCount = 1_000
+ minEpoch = 1_000
+ maxFilename = 1_000
+ maxStorageID = 10_000
+ )
+
+ db := New(WithPath(upgradeFilePath), WithEpochState(epochState{e: minEpoch}), WithLogger(test.NewLogger(t)))
+ require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
+ db.boltDB.AllocSize = allocSize
+ db.boltDB.NoSync = true
+ require.NoError(t, db.Init())
+ containers := make([]cid.ID, containersCount)
+ for i := range containers {
+ containers[i] = cidtest.ID()
+ }
+ oc, err := db.ObjectCounters()
+ require.NoError(t, err)
+ require.True(t, oc.IsZero())
+ eg, ctx := errgroup.WithContext(context.Background())
+ eg.SetLimit(generateWorkersCount)
+ // simple objects
+ for i := 0; i < simpleObjectsCount; i++ {
+ i := i
+ eg.Go(func() error {
+ obj := testutil.GenerateObjectWithCID(containers[i%len(containers)])
+ testutil.AddAttribute(obj, "FileName", strconv.FormatInt(int64(i%maxFilename), 10))
+ testutil.AddAttribute(obj, objectV2.SysAttributeExpEpoch, strconv.FormatUint(uint64(i%minEpoch+minEpoch), 10))
+ _, err := db.Put(ctx, PutPrm{
+ obj: obj,
+ id: []byte(strconv.FormatInt(int64(i%maxStorageID), 10) + "/" + strconv.FormatInt(int64(i%maxStorageID), 10)),
+ })
+ require.NoError(t, err)
+ return nil
+ })
+ }
+ require.NoError(t, eg.Wait())
+ db.log.Info("simple objects generated")
+ eg, ctx = errgroup.WithContext(context.Background())
+ eg.SetLimit(generateWorkersCount)
+ // complex objects
+ for i := 0; i < complexObjectsCount; i++ {
+ i := i
+ eg.Go(func() error {
+ parent := testutil.GenerateObjectWithCID(containers[i%len(containers)])
+ child := testutil.GenerateObjectWithCID(containers[i%len(containers)])
+ child.SetParent(parent)
+ idParent, _ := parent.ID()
+ child.SetParentID(idParent)
+ testutil.AddAttribute(child, "FileName", strconv.FormatInt(int64(i%maxFilename), 10))
+ testutil.AddAttribute(parent, "FileName", strconv.FormatInt(int64(i%maxFilename), 10))
+ testutil.AddAttribute(child, objectV2.SysAttributeExpEpoch, strconv.FormatUint(uint64(i%minEpoch+minEpoch), 10))
+ testutil.AddAttribute(parent, objectV2.SysAttributeExpEpoch, strconv.FormatUint(uint64(i%minEpoch+minEpoch), 10))
+ _, err := db.Put(ctx, PutPrm{
+ obj: child,
+ })
+ require.NoError(t, err)
+ return nil
+ })
+ }
+ require.NoError(t, eg.Wait())
+ db.log.Info("complex objects generated")
+ eg, ctx = errgroup.WithContext(context.Background())
+ eg.SetLimit(generateWorkersCount)
+ // simple objects deleted by gc marks
+ for i := 0; i < deletedByGCMarksCount; i++ {
+ i := i
+ eg.Go(func() error {
+ obj := testutil.GenerateObjectWithCID(containers[i%len(containers)])
+ testutil.AddAttribute(obj, "FileName", strconv.FormatInt(int64(i%maxFilename), 10))
+ _, err := db.Put(ctx, PutPrm{
+ obj: obj,
+ id: []byte(strconv.FormatInt(int64(i%maxStorageID), 10) + "/" + strconv.FormatInt(int64(i%maxStorageID), 10)),
+ })
+ require.NoError(t, err)
+ _, err = db.Inhume(ctx, InhumePrm{
+ target: []oid.Address{object.AddressOf(obj)},
+ })
+ require.NoError(t, err)
+ return nil
+ })
+ }
+ require.NoError(t, eg.Wait())
+ db.log.Info("simple objects deleted by gc marks generated")
+ eg, ctx = errgroup.WithContext(context.Background())
+ eg.SetLimit(10000)
+ // simple objects deleted by tombstones
+ for i := 0; i < deletedByTombstoneCount; i++ {
+ i := i
+ eg.Go(func() error {
+ obj := testutil.GenerateObjectWithCID(containers[i%len(containers)])
+ testutil.AddAttribute(obj, "FileName", strconv.FormatInt(int64(i%maxFilename), 10))
+ _, err := db.Put(ctx, PutPrm{
+ obj: obj,
+ id: []byte(strconv.FormatInt(int64(i%maxStorageID), 10) + "/" + strconv.FormatInt(int64(i%maxStorageID), 10)),
+ })
+ tomb := testutil.GenerateObjectWithCID(containers[i%len(containers)])
+ tomb.SetType(objectSDK.TypeTombstone)
+ _, err = db.Put(ctx, PutPrm{
+ obj: tomb,
+ id: []byte(strconv.FormatInt(int64(i%maxStorageID), 10) + "/" + strconv.FormatInt(int64(i%maxStorageID), 10)),
+ })
+ require.NoError(t, err)
+ tombAddr := object.AddressOf(tomb)
+ _, err = db.Inhume(ctx, InhumePrm{
+ target: []oid.Address{object.AddressOf(obj)},
+ tomb: &tombAddr,
+ })
+ require.NoError(t, err)
+ return nil
+ })
+ }
+ require.NoError(t, eg.Wait())
+ db.log.Info("simple objects deleted by tombstones generated")
+ eg, ctx = errgroup.WithContext(context.Background())
+ eg.SetLimit(generateWorkersCount)
+ // simple objects locked by locks
+ for i := 0; i < lockedCount; i++ {
+ i := i
+ eg.Go(func() error {
+ obj := testutil.GenerateObjectWithCID(containers[i%len(containers)])
+ testutil.AddAttribute(obj, "FileName", strconv.FormatInt(int64(i%maxFilename), 10))
+ _, err := db.Put(ctx, PutPrm{
+ obj: obj,
+ id: []byte(strconv.FormatInt(int64(i%maxStorageID), 10) + "/" + strconv.FormatInt(int64(i%maxStorageID), 10)),
+ })
+ lock := testutil.GenerateObjectWithCID(containers[i%len(containers)])
+ lock.SetType(objectSDK.TypeLock)
+ testutil.AddAttribute(lock, objectV2.SysAttributeExpEpoch, strconv.FormatUint(uint64(i%minEpoch+minEpoch), 10))
+ _, err = db.Put(ctx, PutPrm{
+ obj: lock,
+ id: []byte(strconv.FormatInt(int64(i%maxStorageID), 10) + "/" + strconv.FormatInt(int64(i%maxStorageID), 10)),
+ })
+ require.NoError(t, err)
+ err = db.Lock(ctx, containers[i%len(containers)], object.AddressOf(lock).Object(), []oid.ID{object.AddressOf(obj).Object()})
+ require.NoError(t, err)
+ return nil
+ })
+ }
+ require.NoError(t, eg.Wait())
+ db.log.Info("simple objects locked by locks generated")
+ require.NoError(t, db.boltDB.Sync())
+ require.NoError(t, db.Close())
+}
diff --git a/pkg/local_object_storage/metabase/util.go b/pkg/local_object_storage/metabase/util.go
index 9134616fe..eef7210dc 100644
--- a/pkg/local_object_storage/metabase/util.go
+++ b/pkg/local_object_storage/metabase/util.go
@@ -94,11 +94,13 @@ const (
// ownerPrefix was used for prefixing FKBT index buckets mapping owner to object IDs.
// Key: owner ID
// Value: bucket containing object IDs as keys
- _
+ // removed in version 3
+ ownerPrefix
// userAttributePrefix was used for prefixing FKBT index buckets containing objects.
// Key: attribute value
// Value: bucket containing object IDs as keys
- _
+ // removed in version 3
+ userAttributePrefix
// ====================
// List index buckets.
@@ -107,7 +109,8 @@ const (
// payloadHashPrefix was used for prefixing List index buckets mapping payload hash to a list of object IDs.
// Key: payload hash
// Value: list of object IDs
- _
+ // removed in version 3
+ payloadHashPrefix
// parentPrefix is used for prefixing List index buckets mapping parent ID to a list of children IDs.
// Key: parent ID
// Value: list of object IDs
diff --git a/pkg/local_object_storage/metabase/version.go b/pkg/local_object_storage/metabase/version.go
index bb2b66d9b..9e15babbc 100644
--- a/pkg/local_object_storage/metabase/version.go
+++ b/pkg/local_object_storage/metabase/version.go
@@ -2,6 +2,7 @@ package meta
import (
"encoding/binary"
+ "errors"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
@@ -18,6 +19,8 @@ var versionKey = []byte("version")
// the current code version.
var ErrOutdatedVersion = logicerr.New("invalid version, resynchronization is required")
+var errVersionUndefinedNoInfoBucket = errors.New("version undefined: no info bucket")
+
func checkVersion(tx *bbolt.Tx, initialized bool) error {
var knownVersion bool
@@ -59,3 +62,15 @@ func updateVersion(tx *bbolt.Tx, version uint64) error {
}
return b.Put(versionKey, data)
}
+
+func currentVersion(tx *bbolt.Tx) (uint64, error) {
+ b := tx.Bucket(shardInfoBucket)
+ if b == nil {
+ return 0, errVersionUndefinedNoInfoBucket
+ }
+ data := b.Get(versionKey)
+ if len(data) != 8 {
+ return 0, fmt.Errorf("version undefined: invalid version data length %d", len(data))
+ }
+ return binary.LittleEndian.Uint64(data), nil
+}
From 882c068410628e4e8a7fe929189e97f4e796a5d1 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Thu, 29 Aug 2024 16:33:30 +0300
Subject: [PATCH 066/705] [#1334] metabase: Store upgrade flag
This allows to check if metabase upgrade was not completed.
Signed-off-by: Dmitrii Stepanov
---
pkg/local_object_storage/metabase/upgrade.go | 12 +++++++++++
pkg/local_object_storage/metabase/version.go | 11 +++++++++-
.../metabase/version_test.go | 20 +++++++++++++++++++
pkg/local_object_storage/shard/control.go | 2 +-
4 files changed, 43 insertions(+), 2 deletions(-)
diff --git a/pkg/local_object_storage/metabase/upgrade.go b/pkg/local_object_storage/metabase/upgrade.go
index 014e50286..a4c7707b4 100644
--- a/pkg/local_object_storage/metabase/upgrade.go
+++ b/pkg/local_object_storage/metabase/upgrade.go
@@ -51,9 +51,21 @@ func Upgrade(ctx context.Context, path string, compact bool, log func(a ...any))
if !found {
return fmt.Errorf("unsupported version %d: no update available", version)
}
+ if err := db.Update(func(tx *bbolt.Tx) error {
+ b := tx.Bucket(shardInfoBucket)
+ return b.Put(upgradeKey, zeroValue)
+ }); err != nil {
+ return fmt.Errorf("set upgrade key %w", err)
+ }
if err := updater(ctx, db, log); err != nil {
return fmt.Errorf("update metabase schema: %w", err)
}
+ if err := db.Update(func(tx *bbolt.Tx) error {
+ b := tx.Bucket(shardInfoBucket)
+ return b.Delete(upgradeKey)
+ }); err != nil {
+ return fmt.Errorf("delete upgrade key %w", err)
+ }
if compact {
log("compacting metabase...")
err := compactDB(db)
diff --git a/pkg/local_object_storage/metabase/version.go b/pkg/local_object_storage/metabase/version.go
index 9e15babbc..048bb9af6 100644
--- a/pkg/local_object_storage/metabase/version.go
+++ b/pkg/local_object_storage/metabase/version.go
@@ -12,13 +12,18 @@ import (
// version contains current metabase version.
const version = 3
-var versionKey = []byte("version")
+var (
+ versionKey = []byte("version")
+ upgradeKey = []byte("upgrade")
+)
// ErrOutdatedVersion is returned on initializing
// an existing metabase that is not compatible with
// the current code version.
var ErrOutdatedVersion = logicerr.New("invalid version, resynchronization is required")
+var ErrIncompletedUpgrade = logicerr.New("metabase upgrade is not completed")
+
var errVersionUndefinedNoInfoBucket = errors.New("version undefined: no info bucket")
func checkVersion(tx *bbolt.Tx, initialized bool) error {
@@ -35,6 +40,10 @@ func checkVersion(tx *bbolt.Tx, initialized bool) error {
return fmt.Errorf("%w: expected=%d, stored=%d", ErrOutdatedVersion, version, stored)
}
}
+ data = b.Get(upgradeKey)
+ if len(data) > 0 {
+ return ErrIncompletedUpgrade
+ }
}
if !initialized {
diff --git a/pkg/local_object_storage/metabase/version_test.go b/pkg/local_object_storage/metabase/version_test.go
index b2af428ff..75229a1b4 100644
--- a/pkg/local_object_storage/metabase/version_test.go
+++ b/pkg/local_object_storage/metabase/version_test.go
@@ -84,4 +84,24 @@ func TestVersion(t *testing.T) {
require.NoError(t, db.Close())
})
})
+ t.Run("incompleted upgrade", func(t *testing.T) {
+ db := newDB(t)
+ require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
+ require.NoError(t, db.Init())
+ require.NoError(t, db.Close())
+
+ require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
+ require.NoError(t, db.boltDB.Update(func(tx *bbolt.Tx) error {
+ return tx.Bucket(shardInfoBucket).Put(upgradeKey, zeroValue)
+ }))
+ require.ErrorIs(t, db.Init(), ErrIncompletedUpgrade)
+ require.NoError(t, db.Close())
+
+ require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
+ require.NoError(t, db.boltDB.Update(func(tx *bbolt.Tx) error {
+ return tx.Bucket(shardInfoBucket).Delete(upgradeKey)
+ }))
+ require.NoError(t, db.Init())
+ require.NoError(t, db.Close())
+ })
}
diff --git a/pkg/local_object_storage/shard/control.go b/pkg/local_object_storage/shard/control.go
index 936a506c0..1626d5804 100644
--- a/pkg/local_object_storage/shard/control.go
+++ b/pkg/local_object_storage/shard/control.go
@@ -171,7 +171,7 @@ func (s *Shard) initializeComponents(m mode.Mode) error {
for _, component := range components {
if err := component.Init(); err != nil {
if component == s.metaBase {
- if errors.Is(err, meta.ErrOutdatedVersion) {
+ if errors.Is(err, meta.ErrOutdatedVersion) || errors.Is(err, meta.ErrIncompletedUpgrade) {
return fmt.Errorf("metabase initialization: %w", err)
}
From 98fe24cdb7347b9b4ddfb1c8fe5e2025465d7692 Mon Sep 17 00:00:00 2001
From: Evgenii Stratonikov
Date: Thu, 29 Aug 2024 15:45:33 +0300
Subject: [PATCH 067/705] [#1343] go.mod: Update api-go
Signed-off-by: Evgenii Stratonikov
---
.../modules/control/shards_list.go | 6 +-
cmd/frostfs-cli/modules/tree/add.go | 6 +-
cmd/frostfs-cli/modules/tree/move.go | 2 +-
go.mod | 2 +-
go.sum | 4 +-
pkg/services/control/server/list_shards.go | 10 +-
pkg/services/control/server/policy_engine.go | 4 +-
.../control/server/seal_writecache.go | 4 +-
pkg/services/control/service_frostfs.pb.go | 66 +++----
pkg/services/control/types_frostfs.pb.go | 86 ++++-----
pkg/services/tree/service.go | 22 +--
pkg/services/tree/service_frostfs.pb.go | 172 ++++++++----------
pkg/services/tree/signature_test.go | 2 +-
13 files changed, 180 insertions(+), 206 deletions(-)
diff --git a/cmd/frostfs-cli/modules/control/shards_list.go b/cmd/frostfs-cli/modules/control/shards_list.go
index 07c5bcd9a..e9e49bb29 100644
--- a/cmd/frostfs-cli/modules/control/shards_list.go
+++ b/cmd/frostfs-cli/modules/control/shards_list.go
@@ -61,7 +61,7 @@ func listShards(cmd *cobra.Command, _ []string) {
}
}
-func prettyPrintShardsJSON(cmd *cobra.Command, ii []*control.ShardInfo) {
+func prettyPrintShardsJSON(cmd *cobra.Command, ii []control.ShardInfo) {
out := make([]map[string]any, 0, len(ii))
for _, i := range ii {
out = append(out, map[string]any{
@@ -83,7 +83,7 @@ func prettyPrintShardsJSON(cmd *cobra.Command, ii []*control.ShardInfo) {
cmd.Print(buf.String()) // pretty printer emits newline, so no need for Println
}
-func prettyPrintShards(cmd *cobra.Command, ii []*control.ShardInfo) {
+func prettyPrintShards(cmd *cobra.Command, ii []control.ShardInfo) {
for _, i := range ii {
pathPrinter := func(name, path string) string {
if path == "" {
@@ -121,7 +121,7 @@ func shardModeToString(m control.ShardMode) string {
return "unknown"
}
-func sortShardsByID(ii []*control.ShardInfo) {
+func sortShardsByID(ii []control.ShardInfo) {
sort.Slice(ii, func(i, j int) bool {
return bytes.Compare(ii[i].GetShard_ID(), ii[j].GetShard_ID()) < 0
})
diff --git a/cmd/frostfs-cli/modules/tree/add.go b/cmd/frostfs-cli/modules/tree/add.go
index 068b1d185..0b8dc292f 100644
--- a/cmd/frostfs-cli/modules/tree/add.go
+++ b/cmd/frostfs-cli/modules/tree/add.go
@@ -77,13 +77,13 @@ func add(cmd *cobra.Command, _ []string) {
cmd.Println("Node ID: ", resp.GetBody().GetNodeId())
}
-func parseMeta(cmd *cobra.Command) ([]*tree.KeyValue, error) {
+func parseMeta(cmd *cobra.Command) ([]tree.KeyValue, error) {
raws, _ := cmd.Flags().GetStringSlice(metaFlagKey)
if len(raws) == 0 {
return nil, nil
}
- pairs := make([]*tree.KeyValue, 0, len(raws))
+ pairs := make([]tree.KeyValue, 0, len(raws))
for i := range raws {
k, v, found := strings.Cut(raws[i], "=")
if !found {
@@ -94,7 +94,7 @@ func parseMeta(cmd *cobra.Command) ([]*tree.KeyValue, error) {
pair.Key = k
pair.Value = []byte(v)
- pairs = append(pairs, &pair)
+ pairs = append(pairs, pair)
}
return pairs, nil
diff --git a/cmd/frostfs-cli/modules/tree/move.go b/cmd/frostfs-cli/modules/tree/move.go
index 95516940c..24abbd650 100644
--- a/cmd/frostfs-cli/modules/tree/move.go
+++ b/cmd/frostfs-cli/modules/tree/move.go
@@ -75,7 +75,7 @@ func move(cmd *cobra.Command, _ []string) {
resp, err := cli.GetSubTree(ctx, subTreeReq)
commonCmd.ExitOnErr(cmd, "rpc call: %w", err)
- var meta []*tree.KeyValue
+ var meta []tree.KeyValue
subtreeResp, err := resp.Recv()
for ; err == nil; subtreeResp, err = resp.Recv() {
meta = subtreeResp.GetBody().GetMeta()
diff --git a/go.mod b/go.mod
index b7f59c823..358370201 100644
--- a/go.mod
+++ b/go.mod
@@ -4,7 +4,7 @@ go 1.22
require (
code.gitea.io/sdk/gitea v0.17.1
- git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240827104600-eba18f6e67ac
+ git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240828085308-5e1c6a908f61
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d
diff --git a/go.sum b/go.sum
index d2f926151..be82bff70 100644
--- a/go.sum
+++ b/go.sum
@@ -1,7 +1,7 @@
code.gitea.io/sdk/gitea v0.17.1 h1:3jCPOG2ojbl8AcfaUCRYLT5MUcBMFwS0OSK2mA5Zok8=
code.gitea.io/sdk/gitea v0.17.1/go.mod h1:aCnBqhHpoEWA180gMbaCtdX9Pl6BWBAuuP2miadoTNM=
-git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240827104600-eba18f6e67ac h1:Gu3oiPnsSZPgwsUYs2f3xTQwndM/OWM/zo3zbN4rOb8=
-git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240827104600-eba18f6e67ac/go.mod h1:BDnEpkKMykCS8u1nLzR6SgNzCv6885RWlo5TnravQuI=
+git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240828085308-5e1c6a908f61 h1:bw9EVGWnfY9awFb5XYR52AGbzgg3o04gZF66yHob48c=
+git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240828085308-5e1c6a908f61/go.mod h1:BDnEpkKMykCS8u1nLzR6SgNzCv6885RWlo5TnravQuI=
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e h1:kcBqZBiFIUBATUqEuvVigtkJJWQ2Gug/eYXn967o3M4=
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e/go.mod h1:F/fe1OoIDKr5Bz99q4sriuHDuf3aZefZy9ZsCqEtgxc=
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSVCB8JNSfPG7Uk4r2oSk=
diff --git a/pkg/services/control/server/list_shards.go b/pkg/services/control/server/list_shards.go
index b639245c1..56bd9fc1f 100644
--- a/pkg/services/control/server/list_shards.go
+++ b/pkg/services/control/server/list_shards.go
@@ -25,7 +25,7 @@ func (s *Server) ListShards(_ context.Context, req *control.ListShardsRequest) (
info := s.s.DumpInfo()
- shardInfos := make([]*control.ShardInfo, 0, len(info.Shards))
+ shardInfos := make([]control.ShardInfo, 0, len(info.Shards))
for _, sh := range info.Shards {
si := new(control.ShardInfo)
@@ -54,7 +54,7 @@ func (s *Server) ListShards(_ context.Context, req *control.ListShardsRequest) (
si.SetMode(m)
si.SetErrorCount(sh.ErrorCount)
- shardInfos = append(shardInfos, si)
+ shardInfos = append(shardInfos, *si)
}
body.SetShards(shardInfos)
@@ -67,10 +67,10 @@ func (s *Server) ListShards(_ context.Context, req *control.ListShardsRequest) (
return resp, nil
}
-func blobstorInfoToProto(info blobstor.Info) []*control.BlobstorInfo {
- res := make([]*control.BlobstorInfo, len(info.SubStorages))
+func blobstorInfoToProto(info blobstor.Info) []control.BlobstorInfo {
+ res := make([]control.BlobstorInfo, len(info.SubStorages))
for i := range info.SubStorages {
- res[i] = &control.BlobstorInfo{
+ res[i] = control.BlobstorInfo{
Path: info.SubStorages[i].Path,
Type: info.SubStorages[i].Type,
}
diff --git a/pkg/services/control/server/policy_engine.go b/pkg/services/control/server/policy_engine.go
index 98daac8a6..ab8258e27 100644
--- a/pkg/services/control/server/policy_engine.go
+++ b/pkg/services/control/server/policy_engine.go
@@ -220,13 +220,13 @@ func (s *Server) ListTargetsLocalOverrides(_ context.Context, req *control.ListT
if err != nil {
return nil, status.Error(getCodeByLocalStorageErr(err), err.Error())
}
- targets := make([]*control.ChainTarget, 0, len(apeTargets))
+ targets := make([]control.ChainTarget, 0, len(apeTargets))
for i := range apeTargets {
target, err := controlTarget(&apeTargets[i])
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
- targets = append(targets, &target)
+ targets = append(targets, target)
}
resp := &control.ListTargetsLocalOverridesResponse{
diff --git a/pkg/services/control/server/seal_writecache.go b/pkg/services/control/server/seal_writecache.go
index 1737677b7..6799bdcac 100644
--- a/pkg/services/control/server/seal_writecache.go
+++ b/pkg/services/control/server/seal_writecache.go
@@ -32,12 +32,12 @@ func (s *Server) SealWriteCache(ctx context.Context, req *control.SealWriteCache
resp := &control.SealWriteCacheResponse{Body: &control.SealWriteCacheResponse_Body{}}
for _, r := range res.ShardResults {
if r.Success {
- resp.Body.Results = append(resp.GetBody().GetResults(), &control.SealWriteCacheResponse_Body_Status{
+ resp.Body.Results = append(resp.GetBody().GetResults(), control.SealWriteCacheResponse_Body_Status{
Shard_ID: *r.ShardID,
Success: true,
})
} else {
- resp.Body.Results = append(resp.GetBody().GetResults(), &control.SealWriteCacheResponse_Body_Status{
+ resp.Body.Results = append(resp.GetBody().GetResults(), control.SealWriteCacheResponse_Body_Status{
Shard_ID: *r.ShardID,
Error: r.ErrorMsg,
})
diff --git a/pkg/services/control/service_frostfs.pb.go b/pkg/services/control/service_frostfs.pb.go
index a446c5e59..eb0d95c64 100644
--- a/pkg/services/control/service_frostfs.pb.go
+++ b/pkg/services/control/service_frostfs.pb.go
@@ -2851,7 +2851,7 @@ func (x *ListShardsRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
}
type ListShardsResponse_Body struct {
- Shards []*ShardInfo `json:"shards"`
+ Shards []ShardInfo `json:"shards"`
}
var (
@@ -2869,7 +2869,7 @@ func (x *ListShardsResponse_Body) StableSize() (size int) {
return 0
}
for i := range x.Shards {
- size += proto.NestedStructureSize(1, x.Shards[i])
+ size += proto.NestedStructureSizeUnchecked(1, &x.Shards[i])
}
return size
}
@@ -2888,9 +2888,7 @@ func (x *ListShardsResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
return
}
for i := range x.Shards {
- if x.Shards[i] != nil {
- x.Shards[i].EmitProtobuf(mm.AppendMessage(1))
- }
+ x.Shards[i].EmitProtobuf(mm.AppendMessage(1))
}
}
@@ -2908,8 +2906,8 @@ func (x *ListShardsResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
if !ok {
return fmt.Errorf("cannot unmarshal field %s", "Shards")
}
- x.Shards = append(x.Shards, new(ShardInfo))
- ff := x.Shards[len(x.Shards)-1]
+ x.Shards = append(x.Shards, ShardInfo{})
+ ff := &x.Shards[len(x.Shards)-1]
if err := ff.UnmarshalProtobuf(data); err != nil {
return fmt.Errorf("unmarshal: %w", err)
}
@@ -2917,13 +2915,13 @@ func (x *ListShardsResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
}
return nil
}
-func (x *ListShardsResponse_Body) GetShards() []*ShardInfo {
+func (x *ListShardsResponse_Body) GetShards() []ShardInfo {
if x != nil {
return x.Shards
}
return nil
}
-func (x *ListShardsResponse_Body) SetShards(v []*ShardInfo) {
+func (x *ListShardsResponse_Body) SetShards(v []ShardInfo) {
x.Shards = v
}
@@ -2981,11 +2979,11 @@ func (x *ListShardsResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
switch key {
case "shards":
{
- var f *ShardInfo
- var list []*ShardInfo
+ var f ShardInfo
+ var list []ShardInfo
in.Delim('[')
for !in.IsDelim(']') {
- f = new(ShardInfo)
+ f = ShardInfo{}
f.UnmarshalEasyJSON(in)
list = append(list, f)
in.WantComma()
@@ -11870,7 +11868,7 @@ func (x *ListTargetsLocalOverridesRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
}
type ListTargetsLocalOverridesResponse_Body struct {
- Targets []*ChainTarget `json:"targets"`
+ Targets []ChainTarget `json:"targets"`
}
var (
@@ -11888,7 +11886,7 @@ func (x *ListTargetsLocalOverridesResponse_Body) StableSize() (size int) {
return 0
}
for i := range x.Targets {
- size += proto.NestedStructureSize(1, x.Targets[i])
+ size += proto.NestedStructureSizeUnchecked(1, &x.Targets[i])
}
return size
}
@@ -11907,9 +11905,7 @@ func (x *ListTargetsLocalOverridesResponse_Body) EmitProtobuf(mm *easyproto.Mess
return
}
for i := range x.Targets {
- if x.Targets[i] != nil {
- x.Targets[i].EmitProtobuf(mm.AppendMessage(1))
- }
+ x.Targets[i].EmitProtobuf(mm.AppendMessage(1))
}
}
@@ -11927,8 +11923,8 @@ func (x *ListTargetsLocalOverridesResponse_Body) UnmarshalProtobuf(src []byte) (
if !ok {
return fmt.Errorf("cannot unmarshal field %s", "Targets")
}
- x.Targets = append(x.Targets, new(ChainTarget))
- ff := x.Targets[len(x.Targets)-1]
+ x.Targets = append(x.Targets, ChainTarget{})
+ ff := &x.Targets[len(x.Targets)-1]
if err := ff.UnmarshalProtobuf(data); err != nil {
return fmt.Errorf("unmarshal: %w", err)
}
@@ -11936,13 +11932,13 @@ func (x *ListTargetsLocalOverridesResponse_Body) UnmarshalProtobuf(src []byte) (
}
return nil
}
-func (x *ListTargetsLocalOverridesResponse_Body) GetTargets() []*ChainTarget {
+func (x *ListTargetsLocalOverridesResponse_Body) GetTargets() []ChainTarget {
if x != nil {
return x.Targets
}
return nil
}
-func (x *ListTargetsLocalOverridesResponse_Body) SetTargets(v []*ChainTarget) {
+func (x *ListTargetsLocalOverridesResponse_Body) SetTargets(v []ChainTarget) {
x.Targets = v
}
@@ -12000,11 +11996,11 @@ func (x *ListTargetsLocalOverridesResponse_Body) UnmarshalEasyJSON(in *jlexer.Le
switch key {
case "targets":
{
- var f *ChainTarget
- var list []*ChainTarget
+ var f ChainTarget
+ var list []ChainTarget
in.Delim('[')
for !in.IsDelim(']') {
- f = new(ChainTarget)
+ f = ChainTarget{}
f.UnmarshalEasyJSON(in)
list = append(list, f)
in.WantComma()
@@ -14083,7 +14079,7 @@ func (x *SealWriteCacheResponse_Body_Status) UnmarshalEasyJSON(in *jlexer.Lexer)
}
type SealWriteCacheResponse_Body struct {
- Results []*SealWriteCacheResponse_Body_Status `json:"results"`
+ Results []SealWriteCacheResponse_Body_Status `json:"results"`
}
var (
@@ -14101,7 +14097,7 @@ func (x *SealWriteCacheResponse_Body) StableSize() (size int) {
return 0
}
for i := range x.Results {
- size += proto.NestedStructureSize(1, x.Results[i])
+ size += proto.NestedStructureSizeUnchecked(1, &x.Results[i])
}
return size
}
@@ -14120,9 +14116,7 @@ func (x *SealWriteCacheResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshale
return
}
for i := range x.Results {
- if x.Results[i] != nil {
- x.Results[i].EmitProtobuf(mm.AppendMessage(1))
- }
+ x.Results[i].EmitProtobuf(mm.AppendMessage(1))
}
}
@@ -14140,8 +14134,8 @@ func (x *SealWriteCacheResponse_Body) UnmarshalProtobuf(src []byte) (err error)
if !ok {
return fmt.Errorf("cannot unmarshal field %s", "Results")
}
- x.Results = append(x.Results, new(SealWriteCacheResponse_Body_Status))
- ff := x.Results[len(x.Results)-1]
+ x.Results = append(x.Results, SealWriteCacheResponse_Body_Status{})
+ ff := &x.Results[len(x.Results)-1]
if err := ff.UnmarshalProtobuf(data); err != nil {
return fmt.Errorf("unmarshal: %w", err)
}
@@ -14149,13 +14143,13 @@ func (x *SealWriteCacheResponse_Body) UnmarshalProtobuf(src []byte) (err error)
}
return nil
}
-func (x *SealWriteCacheResponse_Body) GetResults() []*SealWriteCacheResponse_Body_Status {
+func (x *SealWriteCacheResponse_Body) GetResults() []SealWriteCacheResponse_Body_Status {
if x != nil {
return x.Results
}
return nil
}
-func (x *SealWriteCacheResponse_Body) SetResults(v []*SealWriteCacheResponse_Body_Status) {
+func (x *SealWriteCacheResponse_Body) SetResults(v []SealWriteCacheResponse_Body_Status) {
x.Results = v
}
@@ -14213,11 +14207,11 @@ func (x *SealWriteCacheResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
switch key {
case "results":
{
- var f *SealWriteCacheResponse_Body_Status
- var list []*SealWriteCacheResponse_Body_Status
+ var f SealWriteCacheResponse_Body_Status
+ var list []SealWriteCacheResponse_Body_Status
in.Delim('[')
for !in.IsDelim(']') {
- f = new(SealWriteCacheResponse_Body_Status)
+ f = SealWriteCacheResponse_Body_Status{}
f.UnmarshalEasyJSON(in)
list = append(list, f)
in.WantComma()
diff --git a/pkg/services/control/types_frostfs.pb.go b/pkg/services/control/types_frostfs.pb.go
index 3cc37245f..42c1afa52 100644
--- a/pkg/services/control/types_frostfs.pb.go
+++ b/pkg/services/control/types_frostfs.pb.go
@@ -500,10 +500,10 @@ func (x *NodeInfo_Attribute) UnmarshalEasyJSON(in *jlexer.Lexer) {
}
type NodeInfo struct {
- PublicKey []byte `json:"publicKey"`
- Addresses []string `json:"addresses"`
- Attributes []*NodeInfo_Attribute `json:"attributes"`
- State NetmapStatus `json:"state"`
+ PublicKey []byte `json:"publicKey"`
+ Addresses []string `json:"addresses"`
+ Attributes []NodeInfo_Attribute `json:"attributes"`
+ State NetmapStatus `json:"state"`
}
var (
@@ -523,7 +523,7 @@ func (x *NodeInfo) StableSize() (size int) {
size += proto.BytesSize(1, x.PublicKey)
size += proto.RepeatedStringSize(2, x.Addresses)
for i := range x.Attributes {
- size += proto.NestedStructureSize(3, x.Attributes[i])
+ size += proto.NestedStructureSizeUnchecked(3, &x.Attributes[i])
}
size += proto.EnumSize(4, int32(x.State))
return size
@@ -549,9 +549,7 @@ func (x *NodeInfo) EmitProtobuf(mm *easyproto.MessageMarshaler) {
mm.AppendString(2, x.Addresses[j])
}
for i := range x.Attributes {
- if x.Attributes[i] != nil {
- x.Attributes[i].EmitProtobuf(mm.AppendMessage(3))
- }
+ x.Attributes[i].EmitProtobuf(mm.AppendMessage(3))
}
if int32(x.State) != 0 {
mm.AppendInt32(4, int32(x.State))
@@ -584,8 +582,8 @@ func (x *NodeInfo) UnmarshalProtobuf(src []byte) (err error) {
if !ok {
return fmt.Errorf("cannot unmarshal field %s", "Attributes")
}
- x.Attributes = append(x.Attributes, new(NodeInfo_Attribute))
- ff := x.Attributes[len(x.Attributes)-1]
+ x.Attributes = append(x.Attributes, NodeInfo_Attribute{})
+ ff := &x.Attributes[len(x.Attributes)-1]
if err := ff.UnmarshalProtobuf(data); err != nil {
return fmt.Errorf("unmarshal: %w", err)
}
@@ -617,13 +615,13 @@ func (x *NodeInfo) GetAddresses() []string {
func (x *NodeInfo) SetAddresses(v []string) {
x.Addresses = v
}
-func (x *NodeInfo) GetAttributes() []*NodeInfo_Attribute {
+func (x *NodeInfo) GetAttributes() []NodeInfo_Attribute {
if x != nil {
return x.Attributes
}
return nil
}
-func (x *NodeInfo) SetAttributes(v []*NodeInfo_Attribute) {
+func (x *NodeInfo) SetAttributes(v []NodeInfo_Attribute) {
x.Attributes = v
}
func (x *NodeInfo) GetState() NetmapStatus {
@@ -731,11 +729,11 @@ func (x *NodeInfo) UnmarshalEasyJSON(in *jlexer.Lexer) {
}
case "attributes":
{
- var f *NodeInfo_Attribute
- var list []*NodeInfo_Attribute
+ var f NodeInfo_Attribute
+ var list []NodeInfo_Attribute
in.Delim('[')
for !in.IsDelim(']') {
- f = new(NodeInfo_Attribute)
+ f = NodeInfo_Attribute{}
f.UnmarshalEasyJSON(in)
list = append(list, f)
in.WantComma()
@@ -775,8 +773,8 @@ func (x *NodeInfo) UnmarshalEasyJSON(in *jlexer.Lexer) {
}
type Netmap struct {
- Epoch uint64 `json:"epoch"`
- Nodes []*NodeInfo `json:"nodes"`
+ Epoch uint64 `json:"epoch"`
+ Nodes []NodeInfo `json:"nodes"`
}
var (
@@ -795,7 +793,7 @@ func (x *Netmap) StableSize() (size int) {
}
size += proto.UInt64Size(1, x.Epoch)
for i := range x.Nodes {
- size += proto.NestedStructureSize(2, x.Nodes[i])
+ size += proto.NestedStructureSizeUnchecked(2, &x.Nodes[i])
}
return size
}
@@ -817,9 +815,7 @@ func (x *Netmap) EmitProtobuf(mm *easyproto.MessageMarshaler) {
mm.AppendUint64(1, x.Epoch)
}
for i := range x.Nodes {
- if x.Nodes[i] != nil {
- x.Nodes[i].EmitProtobuf(mm.AppendMessage(2))
- }
+ x.Nodes[i].EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -843,8 +839,8 @@ func (x *Netmap) UnmarshalProtobuf(src []byte) (err error) {
if !ok {
return fmt.Errorf("cannot unmarshal field %s", "Nodes")
}
- x.Nodes = append(x.Nodes, new(NodeInfo))
- ff := x.Nodes[len(x.Nodes)-1]
+ x.Nodes = append(x.Nodes, NodeInfo{})
+ ff := &x.Nodes[len(x.Nodes)-1]
if err := ff.UnmarshalProtobuf(data); err != nil {
return fmt.Errorf("unmarshal: %w", err)
}
@@ -861,13 +857,13 @@ func (x *Netmap) GetEpoch() uint64 {
func (x *Netmap) SetEpoch(v uint64) {
x.Epoch = v
}
-func (x *Netmap) GetNodes() []*NodeInfo {
+func (x *Netmap) GetNodes() []NodeInfo {
if x != nil {
return x.Nodes
}
return nil
}
-func (x *Netmap) SetNodes(v []*NodeInfo) {
+func (x *Netmap) SetNodes(v []NodeInfo) {
x.Nodes = v
}
@@ -936,11 +932,11 @@ func (x *Netmap) UnmarshalEasyJSON(in *jlexer.Lexer) {
}
case "nodes":
{
- var f *NodeInfo
- var list []*NodeInfo
+ var f NodeInfo
+ var list []NodeInfo
in.Delim('[')
for !in.IsDelim(']') {
- f = new(NodeInfo)
+ f = NodeInfo{}
f.UnmarshalEasyJSON(in)
list = append(list, f)
in.WantComma()
@@ -958,13 +954,13 @@ func (x *Netmap) UnmarshalEasyJSON(in *jlexer.Lexer) {
}
type ShardInfo struct {
- Shard_ID []byte `json:"shardID"`
- MetabasePath string `json:"metabasePath"`
- Blobstor []*BlobstorInfo `json:"blobstor"`
- WritecachePath string `json:"writecachePath"`
- Mode ShardMode `json:"mode"`
- ErrorCount uint32 `json:"errorCount"`
- PiloramaPath string `json:"piloramaPath"`
+ Shard_ID []byte `json:"shardID"`
+ MetabasePath string `json:"metabasePath"`
+ Blobstor []BlobstorInfo `json:"blobstor"`
+ WritecachePath string `json:"writecachePath"`
+ Mode ShardMode `json:"mode"`
+ ErrorCount uint32 `json:"errorCount"`
+ PiloramaPath string `json:"piloramaPath"`
}
var (
@@ -984,7 +980,7 @@ func (x *ShardInfo) StableSize() (size int) {
size += proto.BytesSize(1, x.Shard_ID)
size += proto.StringSize(2, x.MetabasePath)
for i := range x.Blobstor {
- size += proto.NestedStructureSize(3, x.Blobstor[i])
+ size += proto.NestedStructureSizeUnchecked(3, &x.Blobstor[i])
}
size += proto.StringSize(4, x.WritecachePath)
size += proto.EnumSize(5, int32(x.Mode))
@@ -1013,9 +1009,7 @@ func (x *ShardInfo) EmitProtobuf(mm *easyproto.MessageMarshaler) {
mm.AppendString(2, x.MetabasePath)
}
for i := range x.Blobstor {
- if x.Blobstor[i] != nil {
- x.Blobstor[i].EmitProtobuf(mm.AppendMessage(3))
- }
+ x.Blobstor[i].EmitProtobuf(mm.AppendMessage(3))
}
if len(x.WritecachePath) != 0 {
mm.AppendString(4, x.WritecachePath)
@@ -1057,8 +1051,8 @@ func (x *ShardInfo) UnmarshalProtobuf(src []byte) (err error) {
if !ok {
return fmt.Errorf("cannot unmarshal field %s", "Blobstor")
}
- x.Blobstor = append(x.Blobstor, new(BlobstorInfo))
- ff := x.Blobstor[len(x.Blobstor)-1]
+ x.Blobstor = append(x.Blobstor, BlobstorInfo{})
+ ff := &x.Blobstor[len(x.Blobstor)-1]
if err := ff.UnmarshalProtobuf(data); err != nil {
return fmt.Errorf("unmarshal: %w", err)
}
@@ -1108,13 +1102,13 @@ func (x *ShardInfo) GetMetabasePath() string {
func (x *ShardInfo) SetMetabasePath(v string) {
x.MetabasePath = v
}
-func (x *ShardInfo) GetBlobstor() []*BlobstorInfo {
+func (x *ShardInfo) GetBlobstor() []BlobstorInfo {
if x != nil {
return x.Blobstor
}
return nil
}
-func (x *ShardInfo) SetBlobstor(v []*BlobstorInfo) {
+func (x *ShardInfo) SetBlobstor(v []BlobstorInfo) {
x.Blobstor = v
}
func (x *ShardInfo) GetWritecachePath() string {
@@ -1250,11 +1244,11 @@ func (x *ShardInfo) UnmarshalEasyJSON(in *jlexer.Lexer) {
}
case "blobstor":
{
- var f *BlobstorInfo
- var list []*BlobstorInfo
+ var f BlobstorInfo
+ var list []BlobstorInfo
in.Delim('[')
for !in.IsDelim(']') {
- f = new(BlobstorInfo)
+ f = BlobstorInfo{}
f.UnmarshalEasyJSON(in)
list = append(list, f)
in.WantComma()
diff --git a/pkg/services/tree/service.go b/pkg/services/tree/service.go
index 2012f53d2..4da61617f 100644
--- a/pkg/services/tree/service.go
+++ b/pkg/services/tree/service.go
@@ -360,7 +360,7 @@ func (s *Service) GetNodeByPath(ctx context.Context, req *GetNodeByPathRequest)
return nil, err
}
- info := make([]*GetNodeByPathResponse_Info, 0, len(nodes))
+ info := make([]GetNodeByPathResponse_Info, 0, len(nodes))
for _, node := range nodes {
m, parent, err := s.forest.TreeGetMeta(ctx, cid, b.GetTreeId(), node)
if err != nil {
@@ -374,11 +374,11 @@ func (s *Service) GetNodeByPath(ctx context.Context, req *GetNodeByPathRequest)
if b.GetAllAttributes() {
x.Meta = metaToProto(m.Items)
} else {
- var metaValue []*KeyValue
+ var metaValue []KeyValue
for _, kv := range m.Items {
for _, attr := range b.GetAttributes() {
if kv.Key == attr {
- metaValue = append(metaValue, &KeyValue{
+ metaValue = append(metaValue, KeyValue{
Key: kv.Key,
Value: kv.Value,
})
@@ -388,7 +388,7 @@ func (s *Service) GetNodeByPath(ctx context.Context, req *GetNodeByPathRequest)
}
x.Meta = metaValue
}
- info = append(info, &x)
+ info = append(info, x)
}
return &GetNodeByPathResponse{
@@ -782,21 +782,19 @@ func (s *Service) TreeList(ctx context.Context, req *TreeListRequest) (*TreeList
}, nil
}
-func protoToMeta(arr []*KeyValue) []pilorama.KeyValue {
+func protoToMeta(arr []KeyValue) []pilorama.KeyValue {
meta := make([]pilorama.KeyValue, len(arr))
for i, kv := range arr {
- if kv != nil {
- meta[i].Key = kv.GetKey()
- meta[i].Value = kv.GetValue()
- }
+ meta[i].Key = kv.GetKey()
+ meta[i].Value = kv.GetValue()
}
return meta
}
-func metaToProto(arr []pilorama.KeyValue) []*KeyValue {
- meta := make([]*KeyValue, len(arr))
+func metaToProto(arr []pilorama.KeyValue) []KeyValue {
+ meta := make([]KeyValue, len(arr))
for i, kv := range arr {
- meta[i] = &KeyValue{
+ meta[i] = KeyValue{
Key: kv.Key,
Value: kv.Value,
}
diff --git a/pkg/services/tree/service_frostfs.pb.go b/pkg/services/tree/service_frostfs.pb.go
index 3c6ba21b7..7b6abb1dd 100644
--- a/pkg/services/tree/service_frostfs.pb.go
+++ b/pkg/services/tree/service_frostfs.pb.go
@@ -16,11 +16,11 @@ import (
)
type AddRequest_Body struct {
- ContainerId []byte `json:"containerId"`
- TreeId string `json:"treeId"`
- ParentId uint64 `json:"parentId"`
- Meta []*KeyValue `json:"meta"`
- BearerToken []byte `json:"bearerToken"`
+ ContainerId []byte `json:"containerId"`
+ TreeId string `json:"treeId"`
+ ParentId uint64 `json:"parentId"`
+ Meta []KeyValue `json:"meta"`
+ BearerToken []byte `json:"bearerToken"`
}
var (
@@ -41,7 +41,7 @@ func (x *AddRequest_Body) StableSize() (size int) {
size += proto.StringSize(2, x.TreeId)
size += proto.UInt64Size(3, x.ParentId)
for i := range x.Meta {
- size += proto.NestedStructureSize(4, x.Meta[i])
+ size += proto.NestedStructureSizeUnchecked(4, &x.Meta[i])
}
size += proto.BytesSize(5, x.BearerToken)
return size
@@ -70,9 +70,7 @@ func (x *AddRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
mm.AppendUint64(3, x.ParentId)
}
for i := range x.Meta {
- if x.Meta[i] != nil {
- x.Meta[i].EmitProtobuf(mm.AppendMessage(4))
- }
+ x.Meta[i].EmitProtobuf(mm.AppendMessage(4))
}
if len(x.BearerToken) != 0 {
mm.AppendBytes(5, x.BearerToken)
@@ -111,8 +109,8 @@ func (x *AddRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
if !ok {
return fmt.Errorf("cannot unmarshal field %s", "Meta")
}
- x.Meta = append(x.Meta, new(KeyValue))
- ff := x.Meta[len(x.Meta)-1]
+ x.Meta = append(x.Meta, KeyValue{})
+ ff := &x.Meta[len(x.Meta)-1]
if err := ff.UnmarshalProtobuf(data); err != nil {
return fmt.Errorf("unmarshal: %w", err)
}
@@ -153,13 +151,13 @@ func (x *AddRequest_Body) GetParentId() uint64 {
func (x *AddRequest_Body) SetParentId(v uint64) {
x.ParentId = v
}
-func (x *AddRequest_Body) GetMeta() []*KeyValue {
+func (x *AddRequest_Body) GetMeta() []KeyValue {
if x != nil {
return x.Meta
}
return nil
}
-func (x *AddRequest_Body) SetMeta(v []*KeyValue) {
+func (x *AddRequest_Body) SetMeta(v []KeyValue) {
x.Meta = v
}
func (x *AddRequest_Body) GetBearerToken() []byte {
@@ -264,11 +262,11 @@ func (x *AddRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
}
case "meta":
{
- var f *KeyValue
- var list []*KeyValue
+ var f KeyValue
+ var list []KeyValue
in.Delim('[')
for !in.IsDelim(']') {
- f = new(KeyValue)
+ f = KeyValue{}
f.UnmarshalEasyJSON(in)
list = append(list, f)
in.WantComma()
@@ -793,12 +791,12 @@ func (x *AddResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
}
type AddByPathRequest_Body struct {
- ContainerId []byte `json:"containerId"`
- TreeId string `json:"treeId"`
- PathAttribute string `json:"pathAttribute"`
- Path []string `json:"path"`
- Meta []*KeyValue `json:"meta"`
- BearerToken []byte `json:"bearerToken"`
+ ContainerId []byte `json:"containerId"`
+ TreeId string `json:"treeId"`
+ PathAttribute string `json:"pathAttribute"`
+ Path []string `json:"path"`
+ Meta []KeyValue `json:"meta"`
+ BearerToken []byte `json:"bearerToken"`
}
var (
@@ -820,7 +818,7 @@ func (x *AddByPathRequest_Body) StableSize() (size int) {
size += proto.StringSize(3, x.PathAttribute)
size += proto.RepeatedStringSize(4, x.Path)
for i := range x.Meta {
- size += proto.NestedStructureSize(5, x.Meta[i])
+ size += proto.NestedStructureSizeUnchecked(5, &x.Meta[i])
}
size += proto.BytesSize(6, x.BearerToken)
return size
@@ -852,9 +850,7 @@ func (x *AddByPathRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
mm.AppendString(4, x.Path[j])
}
for i := range x.Meta {
- if x.Meta[i] != nil {
- x.Meta[i].EmitProtobuf(mm.AppendMessage(5))
- }
+ x.Meta[i].EmitProtobuf(mm.AppendMessage(5))
}
if len(x.BearerToken) != 0 {
mm.AppendBytes(6, x.BearerToken)
@@ -899,8 +895,8 @@ func (x *AddByPathRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
if !ok {
return fmt.Errorf("cannot unmarshal field %s", "Meta")
}
- x.Meta = append(x.Meta, new(KeyValue))
- ff := x.Meta[len(x.Meta)-1]
+ x.Meta = append(x.Meta, KeyValue{})
+ ff := &x.Meta[len(x.Meta)-1]
if err := ff.UnmarshalProtobuf(data); err != nil {
return fmt.Errorf("unmarshal: %w", err)
}
@@ -950,13 +946,13 @@ func (x *AddByPathRequest_Body) GetPath() []string {
func (x *AddByPathRequest_Body) SetPath(v []string) {
x.Path = v
}
-func (x *AddByPathRequest_Body) GetMeta() []*KeyValue {
+func (x *AddByPathRequest_Body) GetMeta() []KeyValue {
if x != nil {
return x.Meta
}
return nil
}
-func (x *AddByPathRequest_Body) SetMeta(v []*KeyValue) {
+func (x *AddByPathRequest_Body) SetMeta(v []KeyValue) {
x.Meta = v
}
func (x *AddByPathRequest_Body) GetBearerToken() []byte {
@@ -1086,11 +1082,11 @@ func (x *AddByPathRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
}
case "meta":
{
- var f *KeyValue
- var list []*KeyValue
+ var f KeyValue
+ var list []KeyValue
in.Delim('[')
for !in.IsDelim(']') {
- f = new(KeyValue)
+ f = KeyValue{}
f.UnmarshalEasyJSON(in)
list = append(list, f)
in.WantComma()
@@ -2351,12 +2347,12 @@ func (x *RemoveResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
}
type MoveRequest_Body struct {
- ContainerId []byte `json:"containerId"`
- TreeId string `json:"treeId"`
- ParentId uint64 `json:"parentId"`
- NodeId uint64 `json:"nodeId"`
- Meta []*KeyValue `json:"meta"`
- BearerToken []byte `json:"bearerToken"`
+ ContainerId []byte `json:"containerId"`
+ TreeId string `json:"treeId"`
+ ParentId uint64 `json:"parentId"`
+ NodeId uint64 `json:"nodeId"`
+ Meta []KeyValue `json:"meta"`
+ BearerToken []byte `json:"bearerToken"`
}
var (
@@ -2378,7 +2374,7 @@ func (x *MoveRequest_Body) StableSize() (size int) {
size += proto.UInt64Size(3, x.ParentId)
size += proto.UInt64Size(4, x.NodeId)
for i := range x.Meta {
- size += proto.NestedStructureSize(5, x.Meta[i])
+ size += proto.NestedStructureSizeUnchecked(5, &x.Meta[i])
}
size += proto.BytesSize(6, x.BearerToken)
return size
@@ -2410,9 +2406,7 @@ func (x *MoveRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
mm.AppendUint64(4, x.NodeId)
}
for i := range x.Meta {
- if x.Meta[i] != nil {
- x.Meta[i].EmitProtobuf(mm.AppendMessage(5))
- }
+ x.Meta[i].EmitProtobuf(mm.AppendMessage(5))
}
if len(x.BearerToken) != 0 {
mm.AppendBytes(6, x.BearerToken)
@@ -2457,8 +2451,8 @@ func (x *MoveRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
if !ok {
return fmt.Errorf("cannot unmarshal field %s", "Meta")
}
- x.Meta = append(x.Meta, new(KeyValue))
- ff := x.Meta[len(x.Meta)-1]
+ x.Meta = append(x.Meta, KeyValue{})
+ ff := &x.Meta[len(x.Meta)-1]
if err := ff.UnmarshalProtobuf(data); err != nil {
return fmt.Errorf("unmarshal: %w", err)
}
@@ -2508,13 +2502,13 @@ func (x *MoveRequest_Body) GetNodeId() uint64 {
func (x *MoveRequest_Body) SetNodeId(v uint64) {
x.NodeId = v
}
-func (x *MoveRequest_Body) GetMeta() []*KeyValue {
+func (x *MoveRequest_Body) GetMeta() []KeyValue {
if x != nil {
return x.Meta
}
return nil
}
-func (x *MoveRequest_Body) SetMeta(v []*KeyValue) {
+func (x *MoveRequest_Body) SetMeta(v []KeyValue) {
x.Meta = v
}
func (x *MoveRequest_Body) GetBearerToken() []byte {
@@ -2630,11 +2624,11 @@ func (x *MoveRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
}
case "meta":
{
- var f *KeyValue
- var list []*KeyValue
+ var f KeyValue
+ var list []KeyValue
in.Delim('[')
for !in.IsDelim(']') {
- f = new(KeyValue)
+ f = KeyValue{}
f.UnmarshalEasyJSON(in)
list = append(list, f)
in.WantComma()
@@ -3685,10 +3679,10 @@ func (x *GetNodeByPathRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
}
type GetNodeByPathResponse_Info struct {
- NodeId uint64 `json:"nodeId"`
- Timestamp uint64 `json:"timestamp"`
- Meta []*KeyValue `json:"meta"`
- ParentId uint64 `json:"parentId"`
+ NodeId uint64 `json:"nodeId"`
+ Timestamp uint64 `json:"timestamp"`
+ Meta []KeyValue `json:"meta"`
+ ParentId uint64 `json:"parentId"`
}
var (
@@ -3708,7 +3702,7 @@ func (x *GetNodeByPathResponse_Info) StableSize() (size int) {
size += proto.UInt64Size(1, x.NodeId)
size += proto.UInt64Size(2, x.Timestamp)
for i := range x.Meta {
- size += proto.NestedStructureSize(3, x.Meta[i])
+ size += proto.NestedStructureSizeUnchecked(3, &x.Meta[i])
}
size += proto.UInt64Size(4, x.ParentId)
return size
@@ -3734,9 +3728,7 @@ func (x *GetNodeByPathResponse_Info) EmitProtobuf(mm *easyproto.MessageMarshaler
mm.AppendUint64(2, x.Timestamp)
}
for i := range x.Meta {
- if x.Meta[i] != nil {
- x.Meta[i].EmitProtobuf(mm.AppendMessage(3))
- }
+ x.Meta[i].EmitProtobuf(mm.AppendMessage(3))
}
if x.ParentId != 0 {
mm.AppendUint64(4, x.ParentId)
@@ -3769,8 +3761,8 @@ func (x *GetNodeByPathResponse_Info) UnmarshalProtobuf(src []byte) (err error) {
if !ok {
return fmt.Errorf("cannot unmarshal field %s", "Meta")
}
- x.Meta = append(x.Meta, new(KeyValue))
- ff := x.Meta[len(x.Meta)-1]
+ x.Meta = append(x.Meta, KeyValue{})
+ ff := &x.Meta[len(x.Meta)-1]
if err := ff.UnmarshalProtobuf(data); err != nil {
return fmt.Errorf("unmarshal: %w", err)
}
@@ -3802,13 +3794,13 @@ func (x *GetNodeByPathResponse_Info) GetTimestamp() uint64 {
func (x *GetNodeByPathResponse_Info) SetTimestamp(v uint64) {
x.Timestamp = v
}
-func (x *GetNodeByPathResponse_Info) GetMeta() []*KeyValue {
+func (x *GetNodeByPathResponse_Info) GetMeta() []KeyValue {
if x != nil {
return x.Meta
}
return nil
}
-func (x *GetNodeByPathResponse_Info) SetMeta(v []*KeyValue) {
+func (x *GetNodeByPathResponse_Info) SetMeta(v []KeyValue) {
x.Meta = v
}
func (x *GetNodeByPathResponse_Info) GetParentId() uint64 {
@@ -3902,11 +3894,11 @@ func (x *GetNodeByPathResponse_Info) UnmarshalEasyJSON(in *jlexer.Lexer) {
}
case "meta":
{
- var f *KeyValue
- var list []*KeyValue
+ var f KeyValue
+ var list []KeyValue
in.Delim('[')
for !in.IsDelim(']') {
- f = new(KeyValue)
+ f = KeyValue{}
f.UnmarshalEasyJSON(in)
list = append(list, f)
in.WantComma()
@@ -3930,7 +3922,7 @@ func (x *GetNodeByPathResponse_Info) UnmarshalEasyJSON(in *jlexer.Lexer) {
}
type GetNodeByPathResponse_Body struct {
- Nodes []*GetNodeByPathResponse_Info `json:"nodes"`
+ Nodes []GetNodeByPathResponse_Info `json:"nodes"`
}
var (
@@ -3948,7 +3940,7 @@ func (x *GetNodeByPathResponse_Body) StableSize() (size int) {
return 0
}
for i := range x.Nodes {
- size += proto.NestedStructureSize(1, x.Nodes[i])
+ size += proto.NestedStructureSizeUnchecked(1, &x.Nodes[i])
}
return size
}
@@ -3967,9 +3959,7 @@ func (x *GetNodeByPathResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler
return
}
for i := range x.Nodes {
- if x.Nodes[i] != nil {
- x.Nodes[i].EmitProtobuf(mm.AppendMessage(1))
- }
+ x.Nodes[i].EmitProtobuf(mm.AppendMessage(1))
}
}
@@ -3987,8 +3977,8 @@ func (x *GetNodeByPathResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
if !ok {
return fmt.Errorf("cannot unmarshal field %s", "Nodes")
}
- x.Nodes = append(x.Nodes, new(GetNodeByPathResponse_Info))
- ff := x.Nodes[len(x.Nodes)-1]
+ x.Nodes = append(x.Nodes, GetNodeByPathResponse_Info{})
+ ff := &x.Nodes[len(x.Nodes)-1]
if err := ff.UnmarshalProtobuf(data); err != nil {
return fmt.Errorf("unmarshal: %w", err)
}
@@ -3996,13 +3986,13 @@ func (x *GetNodeByPathResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
}
return nil
}
-func (x *GetNodeByPathResponse_Body) GetNodes() []*GetNodeByPathResponse_Info {
+func (x *GetNodeByPathResponse_Body) GetNodes() []GetNodeByPathResponse_Info {
if x != nil {
return x.Nodes
}
return nil
}
-func (x *GetNodeByPathResponse_Body) SetNodes(v []*GetNodeByPathResponse_Info) {
+func (x *GetNodeByPathResponse_Body) SetNodes(v []GetNodeByPathResponse_Info) {
x.Nodes = v
}
@@ -4060,11 +4050,11 @@ func (x *GetNodeByPathResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
switch key {
case "nodes":
{
- var f *GetNodeByPathResponse_Info
- var list []*GetNodeByPathResponse_Info
+ var f GetNodeByPathResponse_Info
+ var list []GetNodeByPathResponse_Info
in.Delim('[')
for !in.IsDelim(']') {
- f = new(GetNodeByPathResponse_Info)
+ f = GetNodeByPathResponse_Info{}
f.UnmarshalEasyJSON(in)
list = append(list, f)
in.WantComma()
@@ -4935,10 +4925,10 @@ func (x *GetSubTreeRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
}
type GetSubTreeResponse_Body struct {
- NodeId []uint64 `json:"nodeId"`
- ParentId []uint64 `json:"parentId"`
- Timestamp []uint64 `json:"timestamp"`
- Meta []*KeyValue `json:"meta"`
+ NodeId []uint64 `json:"nodeId"`
+ ParentId []uint64 `json:"parentId"`
+ Timestamp []uint64 `json:"timestamp"`
+ Meta []KeyValue `json:"meta"`
}
var (
@@ -4965,7 +4955,7 @@ func (x *GetSubTreeResponse_Body) StableSize() (size int) {
size += protowire.SizeGroup(protowire.Number(3), protowire.SizeVarint(x.Timestamp[i]))
}
for i := range x.Meta {
- size += proto.NestedStructureSize(4, x.Meta[i])
+ size += proto.NestedStructureSizeUnchecked(4, &x.Meta[i])
}
return size
}
@@ -4993,9 +4983,7 @@ func (x *GetSubTreeResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
mm.AppendUint64(3, x.Timestamp[j])
}
for i := range x.Meta {
- if x.Meta[i] != nil {
- x.Meta[i].EmitProtobuf(mm.AppendMessage(4))
- }
+ x.Meta[i].EmitProtobuf(mm.AppendMessage(4))
}
}
@@ -5031,8 +5019,8 @@ func (x *GetSubTreeResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
if !ok {
return fmt.Errorf("cannot unmarshal field %s", "Meta")
}
- x.Meta = append(x.Meta, new(KeyValue))
- ff := x.Meta[len(x.Meta)-1]
+ x.Meta = append(x.Meta, KeyValue{})
+ ff := &x.Meta[len(x.Meta)-1]
if err := ff.UnmarshalProtobuf(data); err != nil {
return fmt.Errorf("unmarshal: %w", err)
}
@@ -5067,13 +5055,13 @@ func (x *GetSubTreeResponse_Body) GetTimestamp() []uint64 {
func (x *GetSubTreeResponse_Body) SetTimestamp(v []uint64) {
x.Timestamp = v
}
-func (x *GetSubTreeResponse_Body) GetMeta() []*KeyValue {
+func (x *GetSubTreeResponse_Body) GetMeta() []KeyValue {
if x != nil {
return x.Meta
}
return nil
}
-func (x *GetSubTreeResponse_Body) SetMeta(v []*KeyValue) {
+func (x *GetSubTreeResponse_Body) SetMeta(v []KeyValue) {
x.Meta = v
}
@@ -5206,11 +5194,11 @@ func (x *GetSubTreeResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
}
case "meta":
{
- var f *KeyValue
- var list []*KeyValue
+ var f KeyValue
+ var list []KeyValue
in.Delim('[')
for !in.IsDelim(']') {
- f = new(KeyValue)
+ f = KeyValue{}
f.UnmarshalEasyJSON(in)
list = append(list, f)
in.WantComma()
diff --git a/pkg/services/tree/signature_test.go b/pkg/services/tree/signature_test.go
index ce5039f7c..3c3ebfe89 100644
--- a/pkg/services/tree/signature_test.go
+++ b/pkg/services/tree/signature_test.go
@@ -123,7 +123,7 @@ func TestMessageSign(t *testing.T) {
ContainerId: rawCID1,
ParentId: 1,
NodeId: 2,
- Meta: []*KeyValue{
+ Meta: []KeyValue{
{Key: "kkk", Value: []byte("vvv")},
},
},
From 2b3fc50681f7e463a74c6075ad16788fc52cfa3f Mon Sep 17 00:00:00 2001
From: Aleksey Savchuk
Date: Fri, 30 Aug 2024 14:02:57 +0300
Subject: [PATCH 068/705] [#1320] shard: Fix
TestGCDropsObjectInhumedFromWritecache flaky test
The `TestGCDropsObjectInhumedFromWritecache` test was flaky because a
running asynchronous rebuild operation prevented GC from deleting the
object. A test-only shard option `WithDisabledRebuild` has been added
to fix this.
Signed-off-by: Aleksey Savchuk
---
pkg/local_object_storage/shard/control.go | 4 ++--
pkg/local_object_storage/shard/gc_test.go | 2 +-
pkg/local_object_storage/shard/shard.go | 10 ++++++++++
3 files changed, 13 insertions(+), 3 deletions(-)
diff --git a/pkg/local_object_storage/shard/control.go b/pkg/local_object_storage/shard/control.go
index 1626d5804..6efe4ec37 100644
--- a/pkg/local_object_storage/shard/control.go
+++ b/pkg/local_object_storage/shard/control.go
@@ -130,7 +130,7 @@ func (s *Shard) Init(ctx context.Context) error {
s.gc.init(ctx)
s.rb = newRebuilder(s.rebuildLimiter)
- if !m.NoMetabase() {
+ if !m.NoMetabase() && !s.rebuildDisabled {
s.rb.Start(ctx, s.blobStor, s.metaBase, s.log)
}
s.writecacheSealCancel.Store(dummyCancel)
@@ -398,7 +398,7 @@ func (s *Shard) Reload(ctx context.Context, opts ...Option) error {
defer unlock()
s.rb.Stop(s.log)
- if !s.info.Mode.NoMetabase() {
+ if !s.info.Mode.NoMetabase() && !s.rebuildDisabled {
defer func() {
s.rb.Start(ctx, s.blobStor, s.metaBase, s.log)
}()
diff --git a/pkg/local_object_storage/shard/gc_test.go b/pkg/local_object_storage/shard/gc_test.go
index 90958cd35..1c0ef1c2e 100644
--- a/pkg/local_object_storage/shard/gc_test.go
+++ b/pkg/local_object_storage/shard/gc_test.go
@@ -187,7 +187,7 @@ func TestGCDropsObjectInhumedFromWritecache(t *testing.T) {
func testGCDropsObjectInhumedFromWritecache(t *testing.T, flushbeforeInhume bool) {
sh := newCustomShard(t, true, shardOptions{
- additionalShardOptions: []Option{WithDisabledGC()},
+ additionalShardOptions: []Option{WithDisabledGC(), WithDisabledRebuild()},
wcOpts: []writecache.Option{writecache.WithDisableBackgroundFlush()},
})
defer func() { require.NoError(t, sh.Close()) }()
diff --git a/pkg/local_object_storage/shard/shard.go b/pkg/local_object_storage/shard/shard.go
index 93f5354a7..d11bcc36b 100644
--- a/pkg/local_object_storage/shard/shard.go
+++ b/pkg/local_object_storage/shard/shard.go
@@ -139,6 +139,8 @@ type cfg struct {
reportErrorFunc func(selfID string, message string, err error)
rebuildLimiter RebuildWorkerLimiter
+
+ rebuildDisabled bool
}
func defaultCfg() *cfg {
@@ -410,6 +412,14 @@ func WithZeroCountCallback(cb EmptyContainersCallback) Option {
}
}
+// WithDisabledRebuild returns an option to disable a shard rebuild.
+// For testing purposes only.
+func WithDisabledRebuild() Option {
+ return func(c *cfg) {
+ c.rebuildDisabled = true
+ }
+}
+
func (s *Shard) fillInfo() {
s.cfg.info.MetaBaseInfo = s.metaBase.DumpInfo()
s.cfg.info.BlobStorInfo = s.blobStor.DumpInfo()
From a685fcdc963b0f58003059bb2dae2d21c925e25a Mon Sep 17 00:00:00 2001
From: Ekaterina Lebedeva
Date: Fri, 30 Aug 2024 19:20:55 +0300
Subject: [PATCH 069/705] [#1317] go.mod: Use range over int
Since Go 1.22 a "for" statement with a "range" clause is able
to iterate through integer values from zero to an upper limit.
gopatch script:
@@
var i, e expression
@@
-for i := 0; i <= e - 1; i++ {
+for i := range e {
...
}
@@
var i, e expression
@@
-for i := 0; i <= e; i++ {
+for i := range e + 1 {
...
}
@@
var i, e expression
@@
-for i := 0; i < e; i++ {
+for i := range e {
...
}
Signed-off-by: Ekaterina Lebedeva
---
.../modules/morph/contract/dump_hashes.go | 4 ++--
.../modules/morph/helper/local_client.go | 2 +-
.../internal/modules/morph/helper/util.go | 2 +-
.../morph/initialize/initialize_test.go | 4 ++--
cmd/frostfs-cli/internal/client/client.go | 2 +-
cmd/frostfs-cli/modules/container/create.go | 2 +-
cmd/frostfs-cli/modules/container/delete.go | 2 +-
cmd/frostfs-cli/modules/object/nodes.go | 2 +-
cmd/frostfs-cli/modules/util/acl.go | 2 +-
cmd/frostfs-node/config/node/config.go | 2 +-
cmd/frostfs-node/morph.go | 2 +-
pkg/innerring/indexer_test.go | 2 +-
pkg/innerring/notary.go | 2 +-
.../processors/alphabet/handlers_test.go | 6 ++---
.../processors/governance/handlers_test.go | 2 +-
.../processors/governance/list_test.go | 4 ++--
.../blobovnicza/sizes_test.go | 2 +-
.../blobovniczatree/concurrency_test.go | 2 +-
.../blobstor/blobovniczatree/rebuild_test.go | 2 +-
.../blobstor/blobstor_test.go | 12 +++++-----
.../blobstor/compression/bench_test.go | 2 +-
.../blobstor/fstree/fstree_test.go | 6 ++---
.../blobstor/perf_test.go | 4 ++--
.../engine/control_test.go | 4 ++--
.../engine/engine_test.go | 12 +++++-----
.../engine/evacuate_test.go | 4 ++--
pkg/local_object_storage/engine/list_test.go | 2 +-
.../engine/remove_copies.go | 2 +-
.../engine/remove_copies_test.go | 4 ++--
.../engine/shards_test.go | 2 +-
pkg/local_object_storage/engine/tree_test.go | 6 ++---
.../internal/testutil/generators_test.go | 6 ++---
.../metabase/containers_test.go | 8 +++----
.../metabase/counter_test.go | 8 +++----
.../metabase/delete_test.go | 2 +-
pkg/local_object_storage/metabase/get_test.go | 4 ++--
.../metabase/list_test.go | 12 +++++-----
.../metabase/lock_test.go | 4 ++--
pkg/local_object_storage/metabase/put_test.go | 2 +-
.../metabase/reset_test.go | 2 +-
.../metabase/select_test.go | 6 ++---
pkg/local_object_storage/metabase/upgrade.go | 2 +-
.../metabase/upgrade_test.go | 10 ++++----
pkg/local_object_storage/pilorama/boltdb.go | 2 +-
.../pilorama/forest_test.go | 24 +++++++++----------
pkg/local_object_storage/shard/list_test.go | 4 ++--
.../shard/metrics_test.go | 10 ++++----
pkg/local_object_storage/shard/refill_test.go | 4 ++--
.../writecache/benchmark/writecache_test.go | 2 +-
pkg/local_object_storage/writecache/flush.go | 2 +-
pkg/morph/event/notary_preparator_test.go | 2 +-
pkg/morph/timer/block_test.go | 2 +-
pkg/network/tls_test.go | 2 +-
pkg/services/control/server/evacuate.go | 2 +-
pkg/services/object/acl/v2/util_test.go | 2 +-
pkg/services/object/get/get_test.go | 8 +++----
pkg/services/object/get/getrangeec_test.go | 2 +-
pkg/services/object/put/ec.go | 2 +-
pkg/services/object/search/search_test.go | 4 ++--
.../object_manager/placement/cache_test.go | 4 ++--
.../object_manager/placement/traverser.go | 4 ++--
.../placement/traverser_test.go | 6 ++---
.../storage/persistent/executor_test.go | 2 +-
pkg/services/tree/getsubtree_test.go | 2 +-
pkg/services/tree/replicator.go | 2 +-
pkg/util/sync/key_locker_test.go | 2 +-
66 files changed, 135 insertions(+), 135 deletions(-)
diff --git a/cmd/frostfs-adm/internal/modules/morph/contract/dump_hashes.go b/cmd/frostfs-adm/internal/modules/morph/contract/dump_hashes.go
index 5a0d29550..be2134b77 100644
--- a/cmd/frostfs-adm/internal/modules/morph/contract/dump_hashes.go
+++ b/cmd/frostfs-adm/internal/modules/morph/contract/dump_hashes.go
@@ -68,7 +68,7 @@ func dumpContractHashes(cmd *cobra.Command, _ []string) error {
if irSize != 0 {
bw.Reset()
- for i := 0; i < irSize; i++ {
+ for i := range irSize {
emit.AppCall(bw.BinWriter, cs.Hash, "resolve", callflag.ReadOnly,
helper.GetAlphabetNNSDomain(i),
int64(nns.TXT))
@@ -79,7 +79,7 @@ func dumpContractHashes(cmd *cobra.Command, _ []string) error {
return fmt.Errorf("can't fetch info from NNS: %w", err)
}
- for i := 0; i < irSize; i++ {
+ for i := range irSize {
info := contractDumpInfo{name: fmt.Sprintf("alphabet %d", i)}
if h, err := helper.ParseNNSResolveResult(alphaRes.Stack[i]); err == nil {
info.hash = h
diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/local_client.go b/cmd/frostfs-adm/internal/modules/morph/helper/local_client.go
index 44d1b4ecf..375fa84d7 100644
--- a/cmd/frostfs-adm/internal/modules/morph/helper/local_client.go
+++ b/cmd/frostfs-adm/internal/modules/morph/helper/local_client.go
@@ -224,7 +224,7 @@ func (l *LocalClient) CalculateNetworkFee(tx *transaction.Transaction) (int64, e
paramz = []manifest.Parameter{{Type: smartcontract.SignatureType}}
} else if nSigs, _, ok := vm.ParseMultiSigContract(w.VerificationScript); ok {
paramz = make([]manifest.Parameter, nSigs)
- for j := 0; j < nSigs; j++ {
+ for j := range nSigs {
paramz[j] = manifest.Parameter{Type: smartcontract.SignatureType}
}
}
diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/util.go b/cmd/frostfs-adm/internal/modules/morph/helper/util.go
index 2d9281c24..8c6b90539 100644
--- a/cmd/frostfs-adm/internal/modules/morph/helper/util.go
+++ b/cmd/frostfs-adm/internal/modules/morph/helper/util.go
@@ -44,7 +44,7 @@ func openAlphabetWallets(v *viper.Viper, walletDir string) ([]*wallet.Wallet, er
var wallets []*wallet.Wallet
var letter string
- for i := 0; i < constants.MaxAlphabetNodes; i++ {
+ for i := range constants.MaxAlphabetNodes {
letter = innerring.GlagoliticLetter(i).String()
p := filepath.Join(walletDir, letter+".json")
var w *wallet.Wallet
diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_test.go b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_test.go
index 6c52aa2ab..74f5d3e88 100644
--- a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_test.go
+++ b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_test.go
@@ -113,7 +113,7 @@ func generateTestData(dir string, size int) error {
}
var pubs []string
- for i := 0; i < size; i++ {
+ for i := range size {
p := filepath.Join(dir, innerring.GlagoliticLetter(i).String()+".json")
w, err := wallet.NewWalletFromFile(p)
if err != nil {
@@ -148,7 +148,7 @@ func generateTestData(dir string, size int) error {
}
func setTestCredentials(v *viper.Viper, size int) {
- for i := 0; i < size; i++ {
+ for i := range size {
v.Set("credentials."+innerring.GlagoliticLetter(i).String(), strconv.FormatUint(uint64(i), 10))
}
v.Set("credentials.contract", constants.TestContractPassword)
diff --git a/cmd/frostfs-cli/internal/client/client.go b/cmd/frostfs-cli/internal/client/client.go
index a0fa22410..57bcf5620 100644
--- a/cmd/frostfs-cli/internal/client/client.go
+++ b/cmd/frostfs-cli/internal/client/client.go
@@ -692,7 +692,7 @@ func SearchObjects(ctx context.Context, prm SearchObjectsPrm) (*SearchObjectsRes
for {
n, ok = rdr.Read(buf)
- for i := 0; i < n; i++ {
+ for i := range n {
list = append(list, buf[i])
}
if !ok {
diff --git a/cmd/frostfs-cli/modules/container/create.go b/cmd/frostfs-cli/modules/container/create.go
index c6f576908..f37b169ce 100644
--- a/cmd/frostfs-cli/modules/container/create.go
+++ b/cmd/frostfs-cli/modules/container/create.go
@@ -139,7 +139,7 @@ It will be stored in sidechain when inner ring will accepts it.`,
},
}
- for i := 0; i < awaitTimeout; i++ {
+ for range awaitTimeout {
time.Sleep(1 * time.Second)
_, err := internalclient.GetContainer(cmd.Context(), getPrm)
diff --git a/cmd/frostfs-cli/modules/container/delete.go b/cmd/frostfs-cli/modules/container/delete.go
index e5425bf25..c20188884 100644
--- a/cmd/frostfs-cli/modules/container/delete.go
+++ b/cmd/frostfs-cli/modules/container/delete.go
@@ -110,7 +110,7 @@ Only owner of the container has a permission to remove container.`,
},
}
- for i := 0; i < awaitTimeout; i++ {
+ for range awaitTimeout {
time.Sleep(1 * time.Second)
_, err := internalclient.GetContainer(cmd.Context(), getPrm)
diff --git a/cmd/frostfs-cli/modules/object/nodes.go b/cmd/frostfs-cli/modules/object/nodes.go
index 896f6f17f..0eac4e6d2 100644
--- a/cmd/frostfs-cli/modules/object/nodes.go
+++ b/cmd/frostfs-cli/modules/object/nodes.go
@@ -195,7 +195,7 @@ func flattenComplexMembersIfECContainer(cmd *cobra.Command, cnrID cid.ID, member
prmHead.SetRawFlag(true) // to get an error instead of whole object
eg, egCtx := errgroup.WithContext(cmd.Context())
- for idx := 0; idx < len(members); idx++ {
+ for idx := range len(members) {
partObjID := members[idx]
eg.Go(func() error {
diff --git a/cmd/frostfs-cli/modules/util/acl.go b/cmd/frostfs-cli/modules/util/acl.go
index 4c2e324b3..145dcc756 100644
--- a/cmd/frostfs-cli/modules/util/acl.go
+++ b/cmd/frostfs-cli/modules/util/acl.go
@@ -33,7 +33,7 @@ func PrettyPrintTableBACL(cmd *cobra.Command, bacl *acl.Basic) {
fmt.Fprintln(w, strings.Join(bits, "\t"))
// Footer
footer := []string{"X F"}
- for i := 0; i < 7; i++ {
+ for range 7 {
footer = append(footer, "U S O B")
}
fmt.Fprintln(w, strings.Join(footer, "\t"))
diff --git a/cmd/frostfs-node/config/node/config.go b/cmd/frostfs-node/config/node/config.go
index 97aca274a..4d063245b 100644
--- a/cmd/frostfs-node/config/node/config.go
+++ b/cmd/frostfs-node/config/node/config.go
@@ -121,7 +121,7 @@ func BootstrapAddresses(c *config.Config) (addr network.AddressGroup) {
func Attributes(c *config.Config) (attrs []string) {
const maxAttributes = 100
- for i := 0; i < maxAttributes; i++ {
+ for i := range maxAttributes {
attr := config.StringSafe(c.Sub(subsection), attributePrefix+"_"+strconv.Itoa(i))
if attr == "" {
return
diff --git a/cmd/frostfs-node/morph.go b/cmd/frostfs-node/morph.go
index 1b148095b..7178cd97d 100644
--- a/cmd/frostfs-node/morph.go
+++ b/cmd/frostfs-node/morph.go
@@ -157,7 +157,7 @@ var (
)
func waitNotaryDeposit(ctx context.Context, c *cfg, tx util.Uint256) error {
- for i := 0; i < notaryDepositRetriesAmount; i++ {
+ for range notaryDepositRetriesAmount {
c.log.Debug(logs.ClientAttemptToWaitForNotaryDepositTransactionToGetPersisted)
select {
case <-ctx.Done():
diff --git a/pkg/innerring/indexer_test.go b/pkg/innerring/indexer_test.go
index 5bc2cc988..c8a819b5b 100644
--- a/pkg/innerring/indexer_test.go
+++ b/pkg/innerring/indexer_test.go
@@ -237,7 +237,7 @@ func BenchmarkKeyPosition(b *testing.B) {
b.ResetTimer()
b.ReportAllocs()
- for i := 0; i < b.N; i++ {
+ for range b.N {
if keyPosition(key, list) != 5 {
b.FailNow()
}
diff --git a/pkg/innerring/notary.go b/pkg/innerring/notary.go
index c601f5587..e6f2b1de4 100644
--- a/pkg/innerring/notary.go
+++ b/pkg/innerring/notary.go
@@ -90,7 +90,7 @@ func (s *Server) initNotary(ctx context.Context, deposit depositor, await awaite
}
func awaitNotaryDepositInClient(ctx context.Context, cli *client.Client, txHash util.Uint256) error {
- for i := 0; i < notaryDepositTimeout; i++ {
+ for range notaryDepositTimeout {
select {
case <-ctx.Done():
return ctx.Err()
diff --git a/pkg/innerring/processors/alphabet/handlers_test.go b/pkg/innerring/processors/alphabet/handlers_test.go
index 346901949..dfda37472 100644
--- a/pkg/innerring/processors/alphabet/handlers_test.go
+++ b/pkg/innerring/processors/alphabet/handlers_test.go
@@ -21,7 +21,7 @@ func TestProcessorEmitsGasToNetmapAndAlphabet(t *testing.T) {
var parsedWallets []util.Uint160 = []util.Uint160{{20}, {25}}
alphabetContracts := innerring.NewAlphabetContracts()
- for i := 0; i <= index; i++ {
+ for i := range index + 1 {
alphabetContracts[innerring.GlagoliticLetter(i)] = util.Uint160{uint8(i)}
}
@@ -98,7 +98,7 @@ func TestProcessorEmitsGasToNetmapIfNoParsedWallets(t *testing.T) {
var parsedWallets []util.Uint160 = []util.Uint160{}
alphabetContracts := innerring.NewAlphabetContracts()
- for i := 0; i <= index; i++ {
+ for i := range index + 1 {
alphabetContracts[innerring.GlagoliticLetter(i)] = util.Uint160{uint8(i)}
}
@@ -170,7 +170,7 @@ func TestProcessorDoesntEmitGasIfNoNetmapOrParsedWallets(t *testing.T) {
var parsedWallets []util.Uint160 = []util.Uint160{}
alphabetContracts := innerring.NewAlphabetContracts()
- for i := 0; i <= index; i++ {
+ for i := range index + 1 {
alphabetContracts[innerring.GlagoliticLetter(i)] = util.Uint160{uint8(i)}
}
diff --git a/pkg/innerring/processors/governance/handlers_test.go b/pkg/innerring/processors/governance/handlers_test.go
index 2a505f8d1..b73e24318 100644
--- a/pkg/innerring/processors/governance/handlers_test.go
+++ b/pkg/innerring/processors/governance/handlers_test.go
@@ -190,7 +190,7 @@ func generateTestKeys(t *testing.T) testKeys {
for {
var result testKeys
- for i := 0; i < 4; i++ {
+ for range 4 {
pk, err := keys.NewPrivateKey()
require.NoError(t, err, "failed to create private key")
result.sidechainKeys = append(result.sidechainKeys, pk.PublicKey())
diff --git a/pkg/innerring/processors/governance/list_test.go b/pkg/innerring/processors/governance/list_test.go
index d099ec837..4ecebf05b 100644
--- a/pkg/innerring/processors/governance/list_test.go
+++ b/pkg/innerring/processors/governance/list_test.go
@@ -49,7 +49,7 @@ func TestNewAlphabetList(t *testing.T) {
}
ln := len(rounds)
- for i := 0; i < ln; i++ {
+ for i := range ln {
list, err = newAlphabetList(list, exp)
require.NoError(t, err)
require.True(t, equalPublicKeyLists(list, rounds[i]))
@@ -131,7 +131,7 @@ func TestUpdateInnerRing(t *testing.T) {
func generateKeys(n int) (keys.PublicKeys, error) {
pubKeys := make(keys.PublicKeys, 0, n)
- for i := 0; i < n; i++ {
+ for range n {
privKey, err := keys.NewPrivateKey()
if err != nil {
return nil, err
diff --git a/pkg/local_object_storage/blobovnicza/sizes_test.go b/pkg/local_object_storage/blobovnicza/sizes_test.go
index 01093b8d7..d582fc5e4 100644
--- a/pkg/local_object_storage/blobovnicza/sizes_test.go
+++ b/pkg/local_object_storage/blobovnicza/sizes_test.go
@@ -42,7 +42,7 @@ func TestSizes(t *testing.T) {
func BenchmarkUpperBound(b *testing.B) {
for _, size := range []uint64{1, 1023, 65 * 1024, 40 * 1024 * 1024} {
b.Run(fmt.Sprintf("size=%d", size), func(b *testing.B) {
- for i := 0; i < b.N; i++ {
+ for range b.N {
_ = upperPowerOfTwo(size)
}
})
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/concurrency_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/concurrency_test.go
index 5bed86142..cc8a52d03 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/concurrency_test.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/concurrency_test.go
@@ -34,7 +34,7 @@ func TestBlobovniczaTree_Concurrency(t *testing.T) {
var cnt atomic.Int64
var wg sync.WaitGroup
- for i := 0; i < 1000; i++ {
+ for range 1000 {
wg.Add(1)
go func() {
defer wg.Done()
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go
index 7a1de4c13..4a51fd86a 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go
@@ -127,7 +127,7 @@ func testBlobovniczaTreeRebuildHelper(t *testing.T, sourceDepth, sourceWidth, ta
eg, egCtx := errgroup.WithContext(context.Background())
storageIDs := make(map[oid.Address][]byte)
storageIDsGuard := &sync.Mutex{}
- for i := 0; i < 100; i++ {
+ for range 100 {
eg.Go(func() error {
obj := blobstortest.NewObject(1024)
data, err := obj.Marshal()
diff --git a/pkg/local_object_storage/blobstor/blobstor_test.go b/pkg/local_object_storage/blobstor/blobstor_test.go
index c7d80dc84..bed5e0eb9 100644
--- a/pkg/local_object_storage/blobstor/blobstor_test.go
+++ b/pkg/local_object_storage/blobstor/blobstor_test.go
@@ -60,7 +60,7 @@ func TestCompression(t *testing.T) {
bigObj := make([]*objectSDK.Object, objCount)
smallObj := make([]*objectSDK.Object, objCount)
- for i := 0; i < objCount; i++ {
+ for i := range objCount {
bigObj[i] = testObject(smallSizeLimit * 2)
smallObj[i] = testObject(smallSizeLimit / 2)
}
@@ -219,7 +219,7 @@ func TestConcurrentPut(t *testing.T) {
bigObj := testObject(smallSizeLimit * 2)
var wg sync.WaitGroup
- for i := 0; i < concurrentPutCount; i++ {
+ for range concurrentPutCount {
wg.Add(1)
go func() {
testPut(t, blobStor, bigObj)
@@ -235,7 +235,7 @@ func TestConcurrentPut(t *testing.T) {
bigObj := testObject(smallSizeLimit * 2)
var wg sync.WaitGroup
- for i := 0; i < concurrentPutCount+1; i++ {
+ for range concurrentPutCount + 1 {
wg.Add(1)
go func() {
testPutFileExistsError(t, blobStor, bigObj)
@@ -251,7 +251,7 @@ func TestConcurrentPut(t *testing.T) {
smallObj := testObject(smallSizeLimit / 2)
var wg sync.WaitGroup
- for i := 0; i < concurrentPutCount; i++ {
+ for range concurrentPutCount {
wg.Add(1)
go func() {
testPut(t, blobStor, smallObj)
@@ -302,7 +302,7 @@ func TestConcurrentDelete(t *testing.T) {
testPut(t, blobStor, bigObj)
var wg sync.WaitGroup
- for i := 0; i < 2; i++ {
+ for range 2 {
wg.Add(1)
go func() {
testDelete(t, blobStor, bigObj)
@@ -319,7 +319,7 @@ func TestConcurrentDelete(t *testing.T) {
testPut(t, blobStor, smallObj)
var wg sync.WaitGroup
- for i := 0; i < 2; i++ {
+ for range 2 {
wg.Add(1)
go func() {
testDelete(t, blobStor, smallObj)
diff --git a/pkg/local_object_storage/blobstor/compression/bench_test.go b/pkg/local_object_storage/blobstor/compression/bench_test.go
index 986912985..9f70f8ec2 100644
--- a/pkg/local_object_storage/blobstor/compression/bench_test.go
+++ b/pkg/local_object_storage/blobstor/compression/bench_test.go
@@ -36,7 +36,7 @@ func BenchmarkCompression(b *testing.B) {
func benchWith(b *testing.B, c Config, data []byte) {
b.ResetTimer()
b.ReportAllocs()
- for i := 0; i < b.N; i++ {
+ for range b.N {
_ = c.Compress(data)
}
}
diff --git a/pkg/local_object_storage/blobstor/fstree/fstree_test.go b/pkg/local_object_storage/blobstor/fstree/fstree_test.go
index d633cbac3..5786dfd3b 100644
--- a/pkg/local_object_storage/blobstor/fstree/fstree_test.go
+++ b/pkg/local_object_storage/blobstor/fstree/fstree_test.go
@@ -28,7 +28,7 @@ func Benchmark_addressFromString(b *testing.B) {
b.ReportAllocs()
b.ResetTimer()
- for i := 0; i < b.N; i++ {
+ for range b.N {
_, err := addressFromString(s)
if err != nil {
b.Fatalf("benchmark error: %v", err)
@@ -73,7 +73,7 @@ func TestObjectCounter(t *testing.T) {
eg, egCtx := errgroup.WithContext(context.Background())
eg.Go(func() error {
- for j := 0; j < 1_000; j++ {
+ for range 1_000 {
_, err := fst.Put(egCtx, putPrm)
if err != nil {
return err
@@ -84,7 +84,7 @@ func TestObjectCounter(t *testing.T) {
eg.Go(func() error {
var le logicerr.Logical
- for j := 0; j < 1_000; j++ {
+ for range 1_000 {
_, err := fst.Delete(egCtx, delPrm)
if err != nil && !errors.As(err, &le) {
return err
diff --git a/pkg/local_object_storage/blobstor/perf_test.go b/pkg/local_object_storage/blobstor/perf_test.go
index 501c95a1d..1ac769e36 100644
--- a/pkg/local_object_storage/blobstor/perf_test.go
+++ b/pkg/local_object_storage/blobstor/perf_test.go
@@ -110,7 +110,7 @@ func BenchmarkSubstorageReadPerf(b *testing.B) {
// Fill database
var errG errgroup.Group
- for i := 0; i < tt.size; i++ {
+ for range tt.size {
obj := objGen.Next()
addr := testutil.AddressFromObject(b, obj)
errG.Go(func() error {
@@ -203,7 +203,7 @@ func BenchmarkSubstorageIteratePerf(b *testing.B) {
defer func() { require.NoError(b, st.Close()) }()
// Fill database
- for i := 0; i < tt.size; i++ {
+ for range tt.size {
obj := objGen.Next()
addr := testutil.AddressFromObject(b, obj)
raw, err := obj.Marshal()
diff --git a/pkg/local_object_storage/engine/control_test.go b/pkg/local_object_storage/engine/control_test.go
index f0809883c..2de92ae84 100644
--- a/pkg/local_object_storage/engine/control_test.go
+++ b/pkg/local_object_storage/engine/control_test.go
@@ -208,7 +208,7 @@ func TestPersistentShardID(t *testing.T) {
require.NoError(t, te.ng.Close(context.Background()))
newTe := newEngineWithErrorThreshold(t, dir, 1)
- for i := 0; i < len(newTe.shards); i++ {
+ for i := range len(newTe.shards) {
require.Equal(t, te.shards[i].id, newTe.shards[i].id)
}
require.NoError(t, newTe.ng.Close(context.Background()))
@@ -269,7 +269,7 @@ func TestReload(t *testing.T) {
e, currShards := engineWithShards(t, removePath, shardNum)
var rcfg ReConfiguration
- for i := 0; i < len(currShards)-1; i++ { // without one of the shards
+ for i := range len(currShards) - 1 { // without one of the shards
rcfg.AddShard(currShards[i], nil)
}
diff --git a/pkg/local_object_storage/engine/engine_test.go b/pkg/local_object_storage/engine/engine_test.go
index 49976abbb..525e17f34 100644
--- a/pkg/local_object_storage/engine/engine_test.go
+++ b/pkg/local_object_storage/engine/engine_test.go
@@ -44,7 +44,7 @@ func BenchmarkExists(b *testing.B) {
func benchmarkExists(b *testing.B, shardNum int) {
shards := make([]*shard.Shard, shardNum)
- for i := 0; i < shardNum; i++ {
+ for i := range shardNum {
shards[i] = testNewShard(b)
}
@@ -52,7 +52,7 @@ func benchmarkExists(b *testing.B, shardNum int) {
defer func() { require.NoError(b, e.Close(context.Background())) }()
addr := oidtest.Address()
- for i := 0; i < 100; i++ {
+ for range 100 {
obj := testutil.GenerateObjectWithCID(cidtest.ID())
err := Put(context.Background(), e, obj)
if err != nil {
@@ -62,7 +62,7 @@ func benchmarkExists(b *testing.B, shardNum int) {
b.ReportAllocs()
b.ResetTimer()
- for i := 0; i < b.N; i++ {
+ for range b.N {
var shPrm shard.ExistsPrm
shPrm.Address = addr
shPrm.ParentAddress = oid.Address{}
@@ -109,7 +109,7 @@ func (te *testEngineWrapper) setInitializedShards(t testing.TB, shards ...*shard
func (te *testEngineWrapper) setShardsNum(t testing.TB, num int) *testEngineWrapper {
shards := make([]*shard.Shard, 0, num)
- for i := 0; i < num; i++ {
+ for range num {
shards = append(shards, testNewShard(t))
}
@@ -117,7 +117,7 @@ func (te *testEngineWrapper) setShardsNum(t testing.TB, num int) *testEngineWrap
}
func (te *testEngineWrapper) setShardsNumOpts(t testing.TB, num int, shardOpts func(id int) []shard.Option) *testEngineWrapper {
- for i := 0; i < num; i++ {
+ for i := range num {
opts := shardOpts(i)
id, err := te.engine.AddShard(context.Background(), opts...)
require.NoError(t, err)
@@ -127,7 +127,7 @@ func (te *testEngineWrapper) setShardsNumOpts(t testing.TB, num int, shardOpts f
}
func (te *testEngineWrapper) setShardsNumAdditionalOpts(t testing.TB, num int, shardOpts func(id int) []shard.Option) *testEngineWrapper {
- for i := 0; i < num; i++ {
+ for i := range num {
defaultOpts := testDefaultShardOptions(t)
opts := append(defaultOpts, shardOpts(i)...)
id, err := te.engine.AddShard(context.Background(), opts...)
diff --git a/pkg/local_object_storage/engine/evacuate_test.go b/pkg/local_object_storage/engine/evacuate_test.go
index 55268b549..8d25dad4a 100644
--- a/pkg/local_object_storage/engine/evacuate_test.go
+++ b/pkg/local_object_storage/engine/evacuate_test.go
@@ -61,7 +61,7 @@ func newEngineEvacuate(t *testing.T, shardNum int, objPerShard int) (*StorageEng
}
for _, sh := range ids {
- for i := 0; i < objPerShard; i++ {
+ for range objPerShard {
contID := cidtest.ID()
obj := testutil.GenerateObjectWithCID(contID)
objects = append(objects, obj)
@@ -554,7 +554,7 @@ func TestEvacuateTreesRemote(t *testing.T) {
require.Equal(t, "", st.ErrorMessage(), "invalid final error message")
expectedTreeOps := make(map[string][]*pilorama.Move)
- for i := 0; i < len(e.shards); i++ {
+ for i := range len(e.shards) {
sourceTrees, err := pilorama.TreeListAll(context.Background(), e.shards[ids[i].String()])
require.NoError(t, err, "list source trees failed")
require.Len(t, sourceTrees, 3)
diff --git a/pkg/local_object_storage/engine/list_test.go b/pkg/local_object_storage/engine/list_test.go
index dd8a2e8a0..11a6c7841 100644
--- a/pkg/local_object_storage/engine/list_test.go
+++ b/pkg/local_object_storage/engine/list_test.go
@@ -79,7 +79,7 @@ func TestListWithCursor(t *testing.T) {
expected := make([]object.Info, 0, tt.objectNum)
got := make([]object.Info, 0, tt.objectNum)
- for i := 0; i < tt.objectNum; i++ {
+ for range tt.objectNum {
containerID := cidtest.ID()
obj := testutil.GenerateObjectWithCIDWithPayload(containerID, []byte{'a'})
diff --git a/pkg/local_object_storage/engine/remove_copies.go b/pkg/local_object_storage/engine/remove_copies.go
index b99cf4f44..5e1ced56a 100644
--- a/pkg/local_object_storage/engine/remove_copies.go
+++ b/pkg/local_object_storage/engine/remove_copies.go
@@ -87,7 +87,7 @@ func (e *StorageEngine) RemoveDuplicates(ctx context.Context, prm RemoveDuplicat
}
})
- for i := 0; i < prm.Concurrency; i++ {
+ for range prm.Concurrency {
errG.Go(func() error {
return e.removeObjects(ctx, ch)
})
diff --git a/pkg/local_object_storage/engine/remove_copies_test.go b/pkg/local_object_storage/engine/remove_copies_test.go
index 99963576c..6d2291c74 100644
--- a/pkg/local_object_storage/engine/remove_copies_test.go
+++ b/pkg/local_object_storage/engine/remove_copies_test.go
@@ -96,7 +96,7 @@ loop:
require.FailNow(t, "unexpected object was removed", removed[i].addr)
}
- for i := 0; i < copyCount; i++ {
+ for i := range copyCount {
if i%3 == 0 {
require.True(t, removedMask[i], "object %d was expected to be removed", i)
} else {
@@ -207,7 +207,7 @@ func TestRebalanceExitByContext(t *testing.T) {
}()
const removeCount = 3
- for i := 0; i < removeCount-1; i++ {
+ for range removeCount - 1 {
<-deleteCh
signal <- struct{}{}
}
diff --git a/pkg/local_object_storage/engine/shards_test.go b/pkg/local_object_storage/engine/shards_test.go
index f4c7a4309..3347d58f1 100644
--- a/pkg/local_object_storage/engine/shards_test.go
+++ b/pkg/local_object_storage/engine/shards_test.go
@@ -72,7 +72,7 @@ func TestSortShardsByWeight(t *testing.T) {
var shards1 []hashedShard
var weights1 []float64
var shards2 []hashedShard
- for i := 0; i < numOfShards; i++ {
+ for i := range numOfShards {
shards1 = append(shards1, hashedShard{
hash: uint64(i),
})
diff --git a/pkg/local_object_storage/engine/tree_test.go b/pkg/local_object_storage/engine/tree_test.go
index 2739058e9..6f694f082 100644
--- a/pkg/local_object_storage/engine/tree_test.go
+++ b/pkg/local_object_storage/engine/tree_test.go
@@ -34,7 +34,7 @@ func benchmarkTreeVsSearch(b *testing.B, objCount int) {
d := pilorama.CIDDescriptor{CID: cid, Position: 0, Size: 1}
treeID := "someTree"
- for i := 0; i < objCount; i++ {
+ for i := range objCount {
obj := testutil.GenerateObjectWithCID(cid)
testutil.AddAttribute(obj, pilorama.AttributeFilename, strconv.Itoa(i))
err := Put(context.Background(), te.ng, obj)
@@ -56,7 +56,7 @@ func benchmarkTreeVsSearch(b *testing.B, objCount int) {
fs.AddFilter(pilorama.AttributeFilename, strconv.Itoa(objCount/2), objectSDK.MatchStringEqual)
prm.WithFilters(fs)
- for i := 0; i < b.N; i++ {
+ for range b.N {
res, err := te.ng.Select(context.Background(), prm)
if err != nil {
b.Fatal(err)
@@ -67,7 +67,7 @@ func benchmarkTreeVsSearch(b *testing.B, objCount int) {
}
})
b.Run("TreeGetByPath", func(b *testing.B) {
- for i := 0; i < b.N; i++ {
+ for range b.N {
nodes, err := te.ng.TreeGetByPath(context.Background(), cid, treeID, pilorama.AttributeFilename, []string{strconv.Itoa(objCount / 2)}, true)
if err != nil {
b.Fatal(err)
diff --git a/pkg/local_object_storage/internal/testutil/generators_test.go b/pkg/local_object_storage/internal/testutil/generators_test.go
index f7be6014d..cc6f726a4 100644
--- a/pkg/local_object_storage/internal/testutil/generators_test.go
+++ b/pkg/local_object_storage/internal/testutil/generators_test.go
@@ -13,7 +13,7 @@ func TestOverwriteObjGenerator(t *testing.T) {
ObjSize: 10,
MaxObjects: 4,
}
- for i := 0; i < 40; i++ {
+ for range 40 {
obj := gen.Next()
id, isSet := obj.ID()
i := binary.LittleEndian.Uint64(id[:])
@@ -26,7 +26,7 @@ func TestOverwriteObjGenerator(t *testing.T) {
func TestRandObjGenerator(t *testing.T) {
gen := &RandObjGenerator{ObjSize: 10}
- for i := 0; i < 10; i++ {
+ for range 10 {
obj := gen.Next()
require.Equal(t, gen.ObjSize, uint64(len(obj.Payload())))
@@ -50,7 +50,7 @@ func TestSeqObjGenerator(t *testing.T) {
func TestRandAddrGenerator(t *testing.T) {
gen := RandAddrGenerator(5)
- for i := 0; i < 50; i++ {
+ for range 50 {
addr := gen.Next()
id := addr.Object()
k := binary.LittleEndian.Uint64(id[:])
diff --git a/pkg/local_object_storage/metabase/containers_test.go b/pkg/local_object_storage/metabase/containers_test.go
index 5d6788d7e..8b1874458 100644
--- a/pkg/local_object_storage/metabase/containers_test.go
+++ b/pkg/local_object_storage/metabase/containers_test.go
@@ -24,7 +24,7 @@ func TestDB_Containers(t *testing.T) {
cids := make(map[string]int, N)
- for i := 0; i < N; i++ {
+ for range N {
obj := testutil.GenerateObject()
cnr, _ := obj.ContainerID()
@@ -95,7 +95,7 @@ func TestDB_ContainersCount(t *testing.T) {
expected := make([]cid.ID, 0, R+T+SG+L)
for _, upload := range uploadObjects {
- for i := 0; i < upload.amount; i++ {
+ for range upload.amount {
obj := testutil.GenerateObject()
obj.SetType(upload.typ)
@@ -126,11 +126,11 @@ func TestDB_ContainerSize(t *testing.T) {
cids := make(map[cid.ID]int, C)
objs := make(map[cid.ID][]*objectSDK.Object, C*N)
- for i := 0; i < C; i++ {
+ for range C {
cnr := cidtest.ID()
cids[cnr] = 0
- for j := 0; j < N; j++ {
+ for range N {
size := rand.Intn(1024)
parent := testutil.GenerateObjectWithCID(cnr)
diff --git a/pkg/local_object_storage/metabase/counter_test.go b/pkg/local_object_storage/metabase/counter_test.go
index 1797fc0aa..d1f808a63 100644
--- a/pkg/local_object_storage/metabase/counter_test.go
+++ b/pkg/local_object_storage/metabase/counter_test.go
@@ -39,14 +39,14 @@ func TestCounters(t *testing.T) {
db := newDB(t)
defer func() { require.NoError(t, db.Close()) }()
oo := make([]*objectSDK.Object, 0, objCount)
- for i := 0; i < objCount; i++ {
+ for range objCount {
oo = append(oo, testutil.GenerateObject())
}
var prm meta.PutPrm
exp := make(map[cid.ID]meta.ObjectCounters)
- for i := 0; i < objCount; i++ {
+ for i := range objCount {
prm.SetObject(oo[i])
cnrID, _ := oo[i].ContainerID()
c := meta.ObjectCounters{}
@@ -187,7 +187,7 @@ func TestCounters(t *testing.T) {
// put objects and check that parent info
// does not affect the counter
- for i := 0; i < objCount; i++ {
+ for i := range objCount {
o := testutil.GenerateObject()
if i < objCount/2 { // half of the objs will have the parent
o.SetParent(parObj)
@@ -535,7 +535,7 @@ func putObjs(t *testing.T, db *meta.DB, count int, withParent bool) []*objectSDK
parent := testutil.GenerateObject()
oo := make([]*objectSDK.Object, 0, count)
- for i := 0; i < count; i++ {
+ for i := range count {
o := testutil.GenerateObject()
if withParent {
o.SetParent(parent)
diff --git a/pkg/local_object_storage/metabase/delete_test.go b/pkg/local_object_storage/metabase/delete_test.go
index 2053874d0..cb85157e7 100644
--- a/pkg/local_object_storage/metabase/delete_test.go
+++ b/pkg/local_object_storage/metabase/delete_test.go
@@ -131,7 +131,7 @@ func TestDelete(t *testing.T) {
defer func() { require.NoError(t, db.Close()) }()
cnr := cidtest.ID()
- for i := 0; i < 10; i++ {
+ for range 10 {
obj := testutil.GenerateObjectWithCID(cnr)
var prm meta.PutPrm
diff --git a/pkg/local_object_storage/metabase/get_test.go b/pkg/local_object_storage/metabase/get_test.go
index 247ddf9cd..7654d2cd8 100644
--- a/pkg/local_object_storage/metabase/get_test.go
+++ b/pkg/local_object_storage/metabase/get_test.go
@@ -223,7 +223,7 @@ func benchmarkGet(b *testing.B, numOfObj int) {
defer func() { require.NoError(b, db.Close()) }()
addrs := make([]oid.Address, 0, numOfObj)
- for i := 0; i < numOfObj; i++ {
+ for range numOfObj {
raw := testutil.GenerateObject()
addrs = append(addrs, object.AddressOf(raw))
@@ -261,7 +261,7 @@ func benchmarkGet(b *testing.B, numOfObj int) {
b.Run("serial", func(b *testing.B) {
b.ReportAllocs()
- for i := 0; i < b.N; i++ {
+ for i := range b.N {
var getPrm meta.GetPrm
getPrm.SetAddress(addrs[i%len(addrs)])
diff --git a/pkg/local_object_storage/metabase/list_test.go b/pkg/local_object_storage/metabase/list_test.go
index a92e2eff4..6207497b1 100644
--- a/pkg/local_object_storage/metabase/list_test.go
+++ b/pkg/local_object_storage/metabase/list_test.go
@@ -35,7 +35,7 @@ func listWithCursorPrepareDB(b *testing.B) *meta.DB {
defer func() { require.NoError(b, db.Close()) }()
obj := testutil.GenerateObject()
- for i := 0; i < 100_000; i++ { // should be a multiple of all batch sizes
+ for i := range 100_000 { // should be a multiple of all batch sizes
obj.SetID(oidtest.ID())
if i%9 == 0 { // let's have 9 objects per container
obj.SetContainerID(cidtest.ID())
@@ -51,7 +51,7 @@ func benchmarkListWithCursor(b *testing.B, db *meta.DB, batchSize int) {
b.ResetTimer()
b.ReportAllocs()
- for i := 0; i < b.N; i++ {
+ for range b.N {
res, err := db.ListWithCursor(context.Background(), prm)
if err != nil {
if err != meta.ErrEndOfListing {
@@ -80,7 +80,7 @@ func TestLisObjectsWithCursor(t *testing.T) {
expected := make([]object.Info, 0, total)
// fill metabase with objects
- for i := 0; i < containers; i++ {
+ for range containers {
containerID := cidtest.ID()
// add one regular object
@@ -140,7 +140,7 @@ func TestLisObjectsWithCursor(t *testing.T) {
expectedIterations--
}
- for i := 0; i < expectedIterations; i++ {
+ for range expectedIterations {
res, cursor, err = metaListWithCursor(db, uint32(countPerReq), cursor)
require.NoError(t, err, "count:%d", countPerReq)
got = append(got, res...)
@@ -169,7 +169,7 @@ func TestAddObjectDuringListingWithCursor(t *testing.T) {
expected := make(map[string]int, total)
// fill metabase with objects
- for i := 0; i < total; i++ {
+ for range total {
obj := testutil.GenerateObject()
err := putBig(db, obj)
require.NoError(t, err)
@@ -186,7 +186,7 @@ func TestAddObjectDuringListingWithCursor(t *testing.T) {
}
// add new objects
- for i := 0; i < total; i++ {
+ for range total {
obj := testutil.GenerateObject()
err = putBig(db, obj)
require.NoError(t, err)
diff --git a/pkg/local_object_storage/metabase/lock_test.go b/pkg/local_object_storage/metabase/lock_test.go
index 2d7bfc1cc..62a109b02 100644
--- a/pkg/local_object_storage/metabase/lock_test.go
+++ b/pkg/local_object_storage/metabase/lock_test.go
@@ -155,7 +155,7 @@ func TestDB_Lock(t *testing.T) {
inhumePrm.SetGCMark()
- for i := 0; i < objsNum; i++ {
+ for i := range objsNum {
inhumePrm.SetAddresses(objectcore.AddressOf(objs[i]))
res, err = db.Inhume(context.Background(), inhumePrm)
@@ -255,7 +255,7 @@ func putAndLockObj(t *testing.T, db *meta.DB, numOfLockedObjs int) ([]*objectSDK
lockedObjs := make([]*objectSDK.Object, 0, numOfLockedObjs)
lockedObjIDs := make([]oid.ID, 0, numOfLockedObjs)
- for i := 0; i < numOfLockedObjs; i++ {
+ for range numOfLockedObjs {
obj := testutil.GenerateObjectWithCID(cnr)
err := putBig(db, obj)
require.NoError(t, err)
diff --git a/pkg/local_object_storage/metabase/put_test.go b/pkg/local_object_storage/metabase/put_test.go
index 84e4029f2..914f5ef06 100644
--- a/pkg/local_object_storage/metabase/put_test.go
+++ b/pkg/local_object_storage/metabase/put_test.go
@@ -74,7 +74,7 @@ func BenchmarkPut(b *testing.B) {
objs := prepareObjects(b.N)
b.ResetTimer()
b.ReportAllocs()
- for i := 0; i < b.N; i++ {
+ for range b.N {
if err := metaPut(db, objs[index.Add(1)], nil); err != nil {
b.Fatal(err)
}
diff --git a/pkg/local_object_storage/metabase/reset_test.go b/pkg/local_object_storage/metabase/reset_test.go
index 66f5eefc6..993079dce 100644
--- a/pkg/local_object_storage/metabase/reset_test.go
+++ b/pkg/local_object_storage/metabase/reset_test.go
@@ -34,7 +34,7 @@ func TestResetDropsContainerBuckets(t *testing.T) {
defer func() { require.NoError(t, db.Close()) }()
- for idx := 0; idx < 100; idx++ {
+ for idx := range 100 {
var putPrm PutPrm
putPrm.SetObject(testutil.GenerateObject())
putPrm.SetStorageID([]byte(fmt.Sprintf("0/%d", idx)))
diff --git a/pkg/local_object_storage/metabase/select_test.go b/pkg/local_object_storage/metabase/select_test.go
index 4fbc5910e..0fab3a108 100644
--- a/pkg/local_object_storage/metabase/select_test.go
+++ b/pkg/local_object_storage/metabase/select_test.go
@@ -920,7 +920,7 @@ func TestDB_SelectSplitID_EC(t *testing.T) {
ec, err := erasurecode.NewConstructor(dataCount, parityCount)
require.NoError(t, err)
- for i := 0; i < partCount; i++ {
+ for i := range partCount {
cs, err := ec.Split(tt.objects[i], &pk.PrivateKey)
require.NoError(t, err)
@@ -1070,7 +1070,7 @@ func BenchmarkSelect(b *testing.B) {
cid := cidtest.ID()
- for i := 0; i < objCount; i++ {
+ for i := range objCount {
var attr objectSDK.Attribute
attr.SetKey("myHeader")
attr.SetValue(strconv.Itoa(i))
@@ -1129,7 +1129,7 @@ func benchmarkSelect(b *testing.B, db *meta.DB, cid cidSDK.ID, fs objectSDK.Sear
prm.SetContainerID(cid)
prm.SetFilters(fs)
- for i := 0; i < b.N; i++ {
+ for range b.N {
res, err := db.Select(context.Background(), prm)
if err != nil {
b.Fatal(err)
diff --git a/pkg/local_object_storage/metabase/upgrade.go b/pkg/local_object_storage/metabase/upgrade.go
index a4c7707b4..e9abd746c 100644
--- a/pkg/local_object_storage/metabase/upgrade.go
+++ b/pkg/local_object_storage/metabase/upgrade.go
@@ -143,7 +143,7 @@ func createExpirationEpochBuckets(ctx context.Context, db *bbolt.DB, log func(a
return selectObjectsWithExpirationEpoch(ctx, db, objects)
})
var count atomic.Uint64
- for i := 0; i < upgradeWorkersCount; i++ {
+ for range upgradeWorkersCount {
eg.Go(func() error {
for {
select {
diff --git a/pkg/local_object_storage/metabase/upgrade_test.go b/pkg/local_object_storage/metabase/upgrade_test.go
index dc3d7d07d..3797de0a4 100644
--- a/pkg/local_object_storage/metabase/upgrade_test.go
+++ b/pkg/local_object_storage/metabase/upgrade_test.go
@@ -91,7 +91,7 @@ func TestGenerateMetabaseFile(t *testing.T) {
eg, ctx := errgroup.WithContext(context.Background())
eg.SetLimit(generateWorkersCount)
// simple objects
- for i := 0; i < simpleObjectsCount; i++ {
+ for i := range simpleObjectsCount {
i := i
eg.Go(func() error {
obj := testutil.GenerateObjectWithCID(containers[i%len(containers)])
@@ -110,7 +110,7 @@ func TestGenerateMetabaseFile(t *testing.T) {
eg, ctx = errgroup.WithContext(context.Background())
eg.SetLimit(generateWorkersCount)
// complex objects
- for i := 0; i < complexObjectsCount; i++ {
+ for i := range complexObjectsCount {
i := i
eg.Go(func() error {
parent := testutil.GenerateObjectWithCID(containers[i%len(containers)])
@@ -134,7 +134,7 @@ func TestGenerateMetabaseFile(t *testing.T) {
eg, ctx = errgroup.WithContext(context.Background())
eg.SetLimit(generateWorkersCount)
// simple objects deleted by gc marks
- for i := 0; i < deletedByGCMarksCount; i++ {
+ for i := range deletedByGCMarksCount {
i := i
eg.Go(func() error {
obj := testutil.GenerateObjectWithCID(containers[i%len(containers)])
@@ -156,7 +156,7 @@ func TestGenerateMetabaseFile(t *testing.T) {
eg, ctx = errgroup.WithContext(context.Background())
eg.SetLimit(10000)
// simple objects deleted by tombstones
- for i := 0; i < deletedByTombstoneCount; i++ {
+ for i := range deletedByTombstoneCount {
i := i
eg.Go(func() error {
obj := testutil.GenerateObjectWithCID(containers[i%len(containers)])
@@ -186,7 +186,7 @@ func TestGenerateMetabaseFile(t *testing.T) {
eg, ctx = errgroup.WithContext(context.Background())
eg.SetLimit(generateWorkersCount)
// simple objects locked by locks
- for i := 0; i < lockedCount; i++ {
+ for i := range lockedCount {
i := i
eg.Go(func() error {
obj := testutil.GenerateObjectWithCID(containers[i%len(containers)])
diff --git a/pkg/local_object_storage/pilorama/boltdb.go b/pkg/local_object_storage/pilorama/boltdb.go
index 29941be83..e2d69cafa 100644
--- a/pkg/local_object_storage/pilorama/boltdb.go
+++ b/pkg/local_object_storage/pilorama/boltdb.go
@@ -705,7 +705,7 @@ func (t *boltForest) applyOperation(logBucket, treeBucket *bbolt.Bucket, ms []*M
key, value = c.Prev()
}
- for i := 0; i < len(ms); i++ {
+ for i := range len(ms) {
// Loop invariant: key represents the next stored timestamp after ms[i].Time.
// 2. Insert the operation.
diff --git a/pkg/local_object_storage/pilorama/forest_test.go b/pkg/local_object_storage/pilorama/forest_test.go
index 41d7a567c..854fe0aad 100644
--- a/pkg/local_object_storage/pilorama/forest_test.go
+++ b/pkg/local_object_storage/pilorama/forest_test.go
@@ -194,7 +194,7 @@ func BenchmarkForestSortedIteration(b *testing.B) {
const total = 100_000
d := CIDDescriptor{cnr, 0, 1}
- for i := 0; i < total; i++ {
+ for i := range total {
u, err := uuid.NewRandom()
if err != nil {
b.FailNow()
@@ -216,7 +216,7 @@ func BenchmarkForestSortedIteration(b *testing.B) {
}
b.Run(providers[i].name+",root", func(b *testing.B) {
- for i := 0; i < b.N; i++ {
+ for range b.N {
res, _, err := f.TreeSortedByFilename(context.Background(), cnr, treeID, MultiNode{RootID}, nil, 100)
if err != nil || len(res) != 100 {
b.Fatalf("err %v, count %d", err, len(res))
@@ -224,7 +224,7 @@ func BenchmarkForestSortedIteration(b *testing.B) {
}
})
b.Run(providers[i].name+",leaf", func(b *testing.B) {
- for i := 0; i < b.N; i++ {
+ for range b.N {
res, _, err := f.TreeSortedByFilename(context.Background(), cnr, treeID, MultiNode{1}, nil, 100)
if err != nil || len(res) != 0 {
b.FailNow()
@@ -804,7 +804,7 @@ func testForestApplySameOperation(t *testing.T, constructor func(t testing.TB, _
defer func() { require.NoError(t, s.Close()) }()
require.NoError(t, s.TreeApply(ctx, cid, treeID, &logs[0], false))
- for i := 0; i < batchSize; i++ {
+ for range batchSize {
errG.Go(func() error {
return s.TreeApply(ctx, cid, treeID, &logs[2], false)
})
@@ -1043,7 +1043,7 @@ func TestForest_ParallelApply(t *testing.T) {
// The operations are guaranteed to be applied and returned sorted by `Time`.
func prepareRandomTree(nodeCount, opCount int) []Move {
ops := make([]Move, nodeCount+opCount)
- for i := 0; i < nodeCount; i++ {
+ for i := range nodeCount {
ops[i] = Move{
Parent: 0,
Meta: Meta{
@@ -1121,14 +1121,14 @@ func testForestTreeParallelApply(t *testing.T, constructor func(t testing.TB, _
require.NoError(t, expected.TreeApply(context.Background(), cid, treeID, &ops[i], false))
}
- for i := 0; i < iterCount; i++ {
+ for range iterCount {
// Shuffle random operations, leave initialization in place.
r.Shuffle(len(ops), func(i, j int) { ops[i], ops[j] = ops[j], ops[i] })
actual := constructor(t, WithMaxBatchSize(batchSize), WithNoSync(true))
wg := new(sync.WaitGroup)
ch := make(chan *Move)
- for i := 0; i < batchSize; i++ {
+ for range batchSize {
wg.Add(1)
go func() {
defer wg.Done()
@@ -1170,7 +1170,7 @@ func testForestTreeApplyRandom(t *testing.T, constructor func(t testing.TB, _ ..
}
const iterCount = 200
- for i := 0; i < iterCount; i++ {
+ for range iterCount {
// Shuffle random operations, leave initialization in place.
r.Shuffle(len(ops), func(i, j int) { ops[i], ops[j] = ops[j], ops[i] })
@@ -1247,7 +1247,7 @@ func BenchmarkApplyReorderLast(b *testing.B) {
Child: uint64(r.Intn(benchNodeCount)),
}
if i != 0 && i%blockSize == 0 {
- for j := 0; j < blockSize/2; j++ {
+ for j := range blockSize / 2 {
ops[i-j], ops[i+j-blockSize] = ops[i+j-blockSize], ops[i-j]
}
}
@@ -1265,7 +1265,7 @@ func benchmarkApply(b *testing.B, s Forest, genFunc func(int) []Move) {
cid := cidtest.ID()
treeID := "version"
ch := make(chan int, b.N)
- for i := 0; i < b.N; i++ {
+ for i := range b.N {
ch <- i
}
@@ -1311,7 +1311,7 @@ func testTreeGetByPath(t *testing.T, s ForestStorage) {
if mf, ok := s.(*memoryForest); ok {
single := mf.treeMap[cid.String()+"/"+treeID]
t.Run("test meta", func(t *testing.T) {
- for i := 0; i < 6; i++ {
+ for i := range 6 {
require.Equal(t, uint64(i), single.infoMap[Node(i+1)].Meta.Time)
}
})
@@ -1492,7 +1492,7 @@ func testTreeListTreesCount(t *testing.T, constructor func(t testing.TB, _ ...Op
var expected []ContainerIDTreeID
treeIDs := []string{"version", "system", "s", "avada kedavra"}
- for i := 0; i < count; i++ {
+ for i := range count {
cid := cidtest.ID()
treeID := treeIDs[i%len(treeIDs)]
expected = append(expected, ContainerIDTreeID{
diff --git a/pkg/local_object_storage/shard/list_test.go b/pkg/local_object_storage/shard/list_test.go
index 8a49a36fd..3414dc76a 100644
--- a/pkg/local_object_storage/shard/list_test.go
+++ b/pkg/local_object_storage/shard/list_test.go
@@ -39,11 +39,11 @@ func testShardList(t *testing.T, sh *Shard) {
var errG errgroup.Group
errG.SetLimit(C * N)
- for i := 0; i < C; i++ {
+ for range C {
errG.Go(func() error {
cnr := cidtest.ID()
- for j := 0; j < N; j++ {
+ for range N {
errG.Go(func() error {
obj := testutil.GenerateObjectWithCID(cnr)
testutil.AddPayload(obj, 1<<2)
diff --git a/pkg/local_object_storage/shard/metrics_test.go b/pkg/local_object_storage/shard/metrics_test.go
index 38d465f31..1ef849c02 100644
--- a/pkg/local_object_storage/shard/metrics_test.go
+++ b/pkg/local_object_storage/shard/metrics_test.go
@@ -206,7 +206,7 @@ func TestCounters(t *testing.T) {
const objNumber = 10
oo := make([]*objectSDK.Object, objNumber)
- for i := 0; i < objNumber; i++ {
+ for i := range objNumber {
oo[i] = testutil.GenerateObject()
}
@@ -248,7 +248,7 @@ func TestCounters(t *testing.T) {
var prm PutPrm
- for i := 0; i < objNumber; i++ {
+ for i := range objNumber {
prm.SetObject(oo[i])
_, err := sh.Put(context.Background(), prm)
@@ -269,7 +269,7 @@ func TestCounters(t *testing.T) {
var prm InhumePrm
inhumedNumber := objNumber / 4
- for i := 0; i < inhumedNumber; i++ {
+ for i := range inhumedNumber {
prm.MarkAsGarbage(objectcore.AddressOf(oo[i]))
_, err := sh.Inhume(context.Background(), prm)
@@ -317,7 +317,7 @@ func TestCounters(t *testing.T) {
_, err := sh.Inhume(context.Background(), prm)
require.NoError(t, err)
- for i := 0; i < inhumedNumber; i++ {
+ for i := range inhumedNumber {
cid, ok := oo[i].ContainerID()
require.True(t, ok)
expectedLogicalSizes[cid.EncodeToString()] -= int64(oo[i].PayloadSize())
@@ -419,7 +419,7 @@ func shardWithMetrics(t *testing.T, path string) (*Shard, *metricsStore) {
func addrFromObjs(oo []*objectSDK.Object) []oid.Address {
aa := make([]oid.Address, len(oo))
- for i := 0; i < len(oo); i++ {
+ for i := range len(oo) {
aa[i] = objectcore.AddressOf(oo[i])
}
diff --git a/pkg/local_object_storage/shard/refill_test.go b/pkg/local_object_storage/shard/refill_test.go
index 509ccaaa6..0025bb45a 100644
--- a/pkg/local_object_storage/shard/refill_test.go
+++ b/pkg/local_object_storage/shard/refill_test.go
@@ -38,7 +38,7 @@ func benchRefillMetabase(b *testing.B, objectsCount int) {
var putPrm PutPrm
- for i := 0; i < objectsCount/2; i++ {
+ for range objectsCount / 2 {
obj := testutil.GenerateObject()
testutil.AddAttribute(obj, "foo", "bar")
testutil.AddPayload(obj, 1<<5) // blobvnicza tree obj
@@ -49,7 +49,7 @@ func benchRefillMetabase(b *testing.B, objectsCount int) {
require.NoError(b, err)
}
- for i := 0; i < objectsCount/2; i++ {
+ for range objectsCount / 2 {
obj := testutil.GenerateObject()
testutil.AddAttribute(obj, "foo", "bar")
obj.SetID(oidtest.ID())
diff --git a/pkg/local_object_storage/writecache/benchmark/writecache_test.go b/pkg/local_object_storage/writecache/benchmark/writecache_test.go
index 4f4398452..4da9a26d7 100644
--- a/pkg/local_object_storage/writecache/benchmark/writecache_test.go
+++ b/pkg/local_object_storage/writecache/benchmark/writecache_test.go
@@ -54,7 +54,7 @@ func benchmarkPutSeq(b *testing.B, cache writecache.Cache, size uint64) {
objGen := testutil.RandObjGenerator{ObjSize: size}
b.ResetTimer()
- for n := 0; n < b.N; n++ {
+ for range b.N {
obj := objGen.Next()
rawData, err := obj.Marshal()
require.NoError(b, err, "marshaling object")
diff --git a/pkg/local_object_storage/writecache/flush.go b/pkg/local_object_storage/writecache/flush.go
index e34f5a76b..930ac8431 100644
--- a/pkg/local_object_storage/writecache/flush.go
+++ b/pkg/local_object_storage/writecache/flush.go
@@ -41,7 +41,7 @@ func (c *cache) runFlushLoop(ctx context.Context) {
if c.disableBackgroundFlush {
return
}
- for i := 0; i < c.workersCount; i++ {
+ for range c.workersCount {
c.wg.Add(1)
go c.workerFlushSmall(ctx)
}
diff --git a/pkg/morph/event/notary_preparator_test.go b/pkg/morph/event/notary_preparator_test.go
index 8da9d868a..4c269bcbd 100644
--- a/pkg/morph/event/notary_preparator_test.go
+++ b/pkg/morph/event/notary_preparator_test.go
@@ -439,7 +439,7 @@ func TestPrepare_CorrectNR(t *testing.T) {
)
for _, test := range tests {
- for i := 0; i < 1; i++ { // run tests against 3 and 4 witness NR
+ for i := range 1 { // run tests against 3 and 4 witness NR
for _, dummyMultisig := range []bool{true, false} { // run tests against empty and dummy multisig/Notary witness
additionalWitness := i == 0
nr := correctNR(script(test.hash, test.method, test.args...), dummyMultisig, additionalWitness)
diff --git a/pkg/morph/timer/block_test.go b/pkg/morph/timer/block_test.go
index 93bb04de5..7929754c1 100644
--- a/pkg/morph/timer/block_test.go
+++ b/pkg/morph/timer/block_test.go
@@ -208,7 +208,7 @@ func TestBlockTimer_TickSameHeight(t *testing.T) {
require.NoError(t, bt.Reset())
check := func(t *testing.T, h uint32, base, delta int) {
- for i := 0; i < 2*int(blockDur); i++ {
+ for range 2 * int(blockDur) {
bt.Tick(h)
require.Equal(t, base, baseCounter)
require.Equal(t, delta, deltaCounter)
diff --git a/pkg/network/tls_test.go b/pkg/network/tls_test.go
index 6c352484b..14729f4c2 100644
--- a/pkg/network/tls_test.go
+++ b/pkg/network/tls_test.go
@@ -37,7 +37,7 @@ func BenchmarkAddressTLSEnabled(b *testing.B) {
b.ReportAllocs()
var enabled bool
- for i := 0; i < b.N; i++ {
+ for range b.N {
enabled = addr.IsTLSEnabled()
}
require.True(b, enabled)
diff --git a/pkg/services/control/server/evacuate.go b/pkg/services/control/server/evacuate.go
index 0ba8be765..300cb9dc9 100644
--- a/pkg/services/control/server/evacuate.go
+++ b/pkg/services/control/server/evacuate.go
@@ -169,7 +169,7 @@ func (s *Server) getContainerNodes(contID cid.ID) ([]netmap.NodeInfo, error) {
nodes := placement.FlattenNodes(ns)
bs := (*keys.PublicKey)(&s.key.PublicKey).Bytes()
- for i := 0; i < len(nodes); i++ {
+ for i := range len(nodes) {
if bytes.Equal(nodes[i].PublicKey(), bs) {
copy(nodes[i:], nodes[i+1:])
nodes = nodes[:len(nodes)-1]
diff --git a/pkg/services/object/acl/v2/util_test.go b/pkg/services/object/acl/v2/util_test.go
index 394feef4e..435339683 100644
--- a/pkg/services/object/acl/v2/util_test.go
+++ b/pkg/services/object/acl/v2/util_test.go
@@ -33,7 +33,7 @@ func TestOriginalTokens(t *testing.T) {
var sTokenV2 session.Token
sToken.WriteToV2(&sTokenV2)
- for i := 0; i < 10; i++ {
+ for i := range 10 {
metaHeaders := testGenerateMetaHeader(uint32(i), &bTokenV2, &sTokenV2)
res, err := originalSessionToken(metaHeaders)
require.NoError(t, err)
diff --git a/pkg/services/object/get/get_test.go b/pkg/services/object/get/get_test.go
index 1fc6b7b20..6827018dc 100644
--- a/pkg/services/object/get/get_test.go
+++ b/pkg/services/object/get/get_test.go
@@ -470,7 +470,7 @@ func testNodeMatrix(t testing.TB, dim []int) ([][]netmap.NodeInfo, [][]string) {
ns := make([]netmap.NodeInfo, dim[i])
as := make([]string, dim[i])
- for j := 0; j < dim[i]; j++ {
+ for j := range dim[i] {
a := fmt.Sprintf("/ip4/192.168.0.%s/tcp/%s",
strconv.Itoa(i),
strconv.Itoa(60000+j),
@@ -508,7 +508,7 @@ func generateChain(ln int, cnr cid.ID) ([]*objectSDK.Object, []oid.ID, []byte) {
ids := make([]oid.ID, 0, ln)
payload := make([]byte, 0, ln*10)
- for i := 0; i < ln; i++ {
+ for i := range ln {
ids = append(ids, curID)
addr.SetObject(curID)
@@ -1750,7 +1750,7 @@ func TestGetRange(t *testing.T) {
},
})
- for from := 0; from < totalSize-1; from++ {
+ for from := range totalSize - 1 {
for to := from; to < totalSize; to++ {
t.Run(fmt.Sprintf("from=%d,to=%d", from, to), func(t *testing.T) {
testGetRange(t, svc, addr, uint64(from), uint64(to), payload)
@@ -1811,7 +1811,7 @@ func TestGetRange(t *testing.T) {
},
})
- for from := 0; from < totalSize-1; from++ {
+ for from := range totalSize - 1 {
for to := from; to < totalSize; to++ {
t.Run(fmt.Sprintf("from=%d,to=%d", from, to), func(t *testing.T) {
testGetRange(t, svc, addr, uint64(from), uint64(to), payload)
diff --git a/pkg/services/object/get/getrangeec_test.go b/pkg/services/object/get/getrangeec_test.go
index b8497d7d1..a6882d4a8 100644
--- a/pkg/services/object/get/getrangeec_test.go
+++ b/pkg/services/object/get/getrangeec_test.go
@@ -131,7 +131,7 @@ func TestGetRangeEC(t *testing.T) {
clients: clients,
})
- for from := 0; from < totalSize-1; from++ {
+ for from := range totalSize - 1 {
for to := from; to < totalSize; to++ {
t.Run(fmt.Sprintf("from=%d,to=%d", from, to), func(t *testing.T) {
testGetRange(t, svc, addr, uint64(from), uint64(to), obj.Payload())
diff --git a/pkg/services/object/put/ec.go b/pkg/services/object/put/ec.go
index 1fadf65fe..9980f6d61 100644
--- a/pkg/services/object/put/ec.go
+++ b/pkg/services/object/put/ec.go
@@ -276,7 +276,7 @@ func (e *ecWriter) writePart(ctx context.Context, obj *objectSDK.Object, partIdx
}
// try to save to any node not visited by current part
- for i := 0; i < len(nodes); i++ {
+ for i := range len(nodes) {
select {
case <-ctx.Done():
return ctx.Err()
diff --git a/pkg/services/object/search/search_test.go b/pkg/services/object/search/search_test.go
index 679380402..44abcfe5b 100644
--- a/pkg/services/object/search/search_test.go
+++ b/pkg/services/object/search/search_test.go
@@ -151,7 +151,7 @@ func testSHA256() (cs [sha256.Size]byte) {
func generateIDs(num int) []oid.ID {
res := make([]oid.ID, num)
- for i := 0; i < num; i++ {
+ for i := range num {
res[i].SetSHA256(testSHA256())
}
@@ -232,7 +232,7 @@ func testNodeMatrix(t testing.TB, dim []int) ([][]netmap.NodeInfo, [][]string) {
ns := make([]netmap.NodeInfo, dim[i])
as := make([]string, dim[i])
- for j := 0; j < dim[i]; j++ {
+ for j := range dim[i] {
a := fmt.Sprintf("/ip4/192.168.0.%s/tcp/%s",
strconv.Itoa(i),
strconv.Itoa(60000+j),
diff --git a/pkg/services/object_manager/placement/cache_test.go b/pkg/services/object_manager/placement/cache_test.go
index 07e9340dc..a890d5357 100644
--- a/pkg/services/object_manager/placement/cache_test.go
+++ b/pkg/services/object_manager/placement/cache_test.go
@@ -64,7 +64,7 @@ func TestContainerNodesCache(t *testing.T) {
nm2 := nm(1, nodes[1:2])
cnr := [size * 2]cid.ID{}
res := [size * 2][][]netmapSDK.NodeInfo{}
- for i := 0; i < size*2; i++ {
+ for i := range size * 2 {
cnr[i] = cidtest.ID()
var err error
@@ -77,7 +77,7 @@ func TestContainerNodesCache(t *testing.T) {
require.NoError(t, err)
require.Equal(t, res[i], r)
}
- for i := 0; i < size; i++ {
+ for i := range size {
r, err := c.ContainerNodes(nm2, cnr[i], pp)
require.NoError(t, err)
require.NotEqual(t, res[i], r)
diff --git a/pkg/services/object_manager/placement/traverser.go b/pkg/services/object_manager/placement/traverser.go
index 9a5877c52..4e790628f 100644
--- a/pkg/services/object_manager/placement/traverser.go
+++ b/pkg/services/object_manager/placement/traverser.go
@@ -136,7 +136,7 @@ func defaultCopiesVector(policy netmap.PlacementPolicy) []int {
replNum := policy.NumberOfReplicas()
copyVector := make([]int, 0, replNum)
- for i := 0; i < replNum; i++ {
+ for i := range replNum {
copyVector = append(copyVector, int(policy.ReplicaDescriptor(i).NumberOfObjects()+policy.ReplicaDescriptor(i).GetECDataCount()+policy.ReplicaDescriptor(i).GetECParityCount()))
}
@@ -212,7 +212,7 @@ func (t *Traverser) Next() []Node {
nodes := make([]Node, count)
- for i := 0; i < count; i++ {
+ for i := range count {
err := nodes[i].addresses.FromIterator(network.NodeEndpointsIterator(t.vectors[0][i]))
if err != nil {
return nil
diff --git a/pkg/services/object_manager/placement/traverser_test.go b/pkg/services/object_manager/placement/traverser_test.go
index f5731c81e..b3b57677d 100644
--- a/pkg/services/object_manager/placement/traverser_test.go
+++ b/pkg/services/object_manager/placement/traverser_test.go
@@ -48,7 +48,7 @@ func testPlacement(ss, rs []int) ([][]netmap.NodeInfo, container.Container) {
for i := range ss {
ns := make([]netmap.NodeInfo, 0, ss[i])
- for j := 0; j < ss[i]; j++ {
+ for range ss[i] {
ns = append(ns, testNode(num))
num++
}
@@ -125,7 +125,7 @@ func TestTraverserObjectScenarios(t *testing.T) {
)
require.NoError(t, err)
- for i := 0; i < len(nodes[0]); i++ {
+ for range len(nodes[0]) {
require.NotNil(t, tr.Next())
}
@@ -164,7 +164,7 @@ func TestTraverserObjectScenarios(t *testing.T) {
require.Empty(t, tr.Next())
require.False(t, tr.Success())
- for i := 0; i < replicas[curVector]; i++ {
+ for range replicas[curVector] {
tr.SubmitSuccess()
}
}
diff --git a/pkg/services/session/storage/persistent/executor_test.go b/pkg/services/session/storage/persistent/executor_test.go
index 39cb14359..124d36930 100644
--- a/pkg/services/session/storage/persistent/executor_test.go
+++ b/pkg/services/session/storage/persistent/executor_test.go
@@ -39,7 +39,7 @@ func TestTokenStore(t *testing.T) {
tokens := make([]tok, 0, tokenNumber)
- for i := 0; i < tokenNumber; i++ {
+ for i := range tokenNumber {
req.SetExpiration(uint64(i))
res, err := ts.Create(context.Background(), req)
diff --git a/pkg/services/tree/getsubtree_test.go b/pkg/services/tree/getsubtree_test.go
index 677431889..95bdda34b 100644
--- a/pkg/services/tree/getsubtree_test.go
+++ b/pkg/services/tree/getsubtree_test.go
@@ -62,7 +62,7 @@ func TestGetSubTree(t *testing.T) {
loop:
for i := 1; i < len(acc.seen); i++ {
parent := acc.seen[i].Body.ParentId
- for j := 0; j < i; j++ {
+ for j := range i {
if acc.seen[j].Body.NodeId[0] == parent[0] {
continue loop
}
diff --git a/pkg/services/tree/replicator.go b/pkg/services/tree/replicator.go
index 346198b3c..95c8f8013 100644
--- a/pkg/services/tree/replicator.go
+++ b/pkg/services/tree/replicator.go
@@ -133,7 +133,7 @@ func (s *Service) ReplicateTreeOp(ctx context.Context, n netmapSDK.NodeInfo, req
}
func (s *Service) replicateLoop(ctx context.Context) {
- for i := 0; i < s.replicatorWorkerCount; i++ {
+ for range s.replicatorWorkerCount {
go s.replicationWorker(ctx)
go s.localReplicationWorker(ctx)
}
diff --git a/pkg/util/sync/key_locker_test.go b/pkg/util/sync/key_locker_test.go
index 3b3e6a694..259064ecf 100644
--- a/pkg/util/sync/key_locker_test.go
+++ b/pkg/util/sync/key_locker_test.go
@@ -13,7 +13,7 @@ func TestKeyLocker(t *testing.T) {
taken := false
eg, _ := errgroup.WithContext(context.Background())
keyLocker := NewKeyLocker[int]()
- for i := 0; i < 100; i++ {
+ for range 100 {
eg.Go(func() error {
keyLocker.Lock(0)
defer keyLocker.Unlock(0)
From a4fb7f085b4cd2c5d7bb2ec91e6e626238dead54 Mon Sep 17 00:00:00 2001
From: Aleksey Savchuk
Date: Tue, 3 Sep 2024 11:39:02 +0300
Subject: [PATCH 070/705] [#1348] go.mod: Update api-go and sdk-go
Signed-off-by: Aleksey Savchuk
---
cmd/frostfs-cli/internal/client/client.go | 26 -----
cmd/frostfs-cli/modules/container/get_eacl.go | 68 -------------
cmd/frostfs-cli/modules/container/root.go | 2 -
cmd/frostfs-cli/modules/util/ape.go | 6 --
go.mod | 4 +-
go.sum | 8 +-
.../transport/container/grpc/service.go | 15 ---
pkg/services/container/ape.go | 12 ---
pkg/services/container/ape_test.go | 95 -------------------
pkg/services/container/audit.go | 11 ---
pkg/services/container/executor.go | 14 ---
pkg/services/container/morph/executor.go | 37 --------
pkg/services/container/server.go | 1 -
pkg/services/container/sign.go | 9 --
14 files changed, 6 insertions(+), 302 deletions(-)
delete mode 100644 cmd/frostfs-cli/modules/container/get_eacl.go
diff --git a/cmd/frostfs-cli/internal/client/client.go b/cmd/frostfs-cli/internal/client/client.go
index 57bcf5620..03a987a57 100644
--- a/cmd/frostfs-cli/internal/client/client.go
+++ b/cmd/frostfs-cli/internal/client/client.go
@@ -17,7 +17,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@@ -192,31 +191,6 @@ func DeleteContainer(ctx context.Context, prm DeleteContainerPrm) (res DeleteCon
return
}
-// EACLPrm groups parameters of EACL operation.
-type EACLPrm struct {
- Client *client.Client
- ClientParams client.PrmContainerEACL
-}
-
-// EACLRes groups the resulting values of EACL operation.
-type EACLRes struct {
- cliRes *client.ResContainerEACL
-}
-
-// EACL returns requested eACL table.
-func (x EACLRes) EACL() eacl.Table {
- return x.cliRes.Table()
-}
-
-// EACL reads eACL table from FrostFS by container ID.
-//
-// Returns any error which prevented the operation from completing correctly in error return.
-func EACL(ctx context.Context, prm EACLPrm) (res EACLRes, err error) {
- res.cliRes, err = prm.Client.ContainerEACL(ctx, prm.ClientParams)
-
- return
-}
-
// NetworkInfoPrm groups parameters of NetworkInfo operation.
type NetworkInfoPrm struct {
Client *client.Client
diff --git a/cmd/frostfs-cli/modules/container/get_eacl.go b/cmd/frostfs-cli/modules/container/get_eacl.go
deleted file mode 100644
index 4ed1c82e1..000000000
--- a/cmd/frostfs-cli/modules/container/get_eacl.go
+++ /dev/null
@@ -1,68 +0,0 @@
-package container
-
-import (
- "os"
-
- internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
- commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
- "github.com/spf13/cobra"
-)
-
-var getExtendedACLCmd = &cobra.Command{
- Use: "get-eacl",
- Short: "Get extended ACL table of container",
- Long: `Get extended ACL table of container`,
- Run: func(cmd *cobra.Command, _ []string) {
- id := parseContainerID(cmd)
- pk := key.GetOrGenerate(cmd)
- cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC)
-
- eaclPrm := internalclient.EACLPrm{
- Client: cli,
- ClientParams: client.PrmContainerEACL{
- ContainerID: &id,
- },
- }
-
- res, err := internalclient.EACL(cmd.Context(), eaclPrm)
- commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
-
- eaclTable := res.EACL()
-
- if containerPathTo == "" {
- cmd.Println("eACL: ")
- common.PrettyPrintJSON(cmd, &eaclTable, "eACL")
-
- return
- }
-
- var data []byte
-
- if containerJSON {
- data, err = eaclTable.MarshalJSON()
- commonCmd.ExitOnErr(cmd, "can't encode to JSON: %w", err)
- } else {
- data, err = eaclTable.Marshal()
- commonCmd.ExitOnErr(cmd, "can't encode to binary: %w", err)
- }
-
- cmd.Println("dumping data to file:", containerPathTo)
-
- err = os.WriteFile(containerPathTo, data, 0o644)
- commonCmd.ExitOnErr(cmd, "could not write eACL to file: %w", err)
- },
-}
-
-func initContainerGetEACLCmd() {
- commonflags.Init(getExtendedACLCmd)
-
- flags := getExtendedACLCmd.Flags()
-
- flags.StringVar(&containerID, commonflags.CIDFlag, "", commonflags.CIDFlagUsage)
- flags.StringVar(&containerPathTo, "to", "", "Path to dump encoded container (default: binary encoded)")
- flags.BoolVar(&containerJSON, commonflags.JSON, false, "Encode EACL table in json format")
-}
diff --git a/cmd/frostfs-cli/modules/container/root.go b/cmd/frostfs-cli/modules/container/root.go
index d5f0fd776..2da21e767 100644
--- a/cmd/frostfs-cli/modules/container/root.go
+++ b/cmd/frostfs-cli/modules/container/root.go
@@ -25,7 +25,6 @@ func init() {
deleteContainerCmd,
listContainerObjectsCmd,
getContainerInfoCmd,
- getExtendedACLCmd,
containerNodesCmd,
policyPlaygroundCmd,
}
@@ -37,7 +36,6 @@ func init() {
initContainerDeleteCmd()
initContainerListObjectsCmd()
initContainerInfoCmd()
- initContainerGetEACLCmd()
initContainerNodesCmd()
initContainerPolicyPlaygroundCmd()
diff --git a/cmd/frostfs-cli/modules/util/ape.go b/cmd/frostfs-cli/modules/util/ape.go
index 9af57434a..73c368510 100644
--- a/cmd/frostfs-cli/modules/util/ape.go
+++ b/cmd/frostfs-cli/modules/util/ape.go
@@ -258,10 +258,6 @@ func parseAction(lexeme string) ([]string, bool, error) {
return []string{nativeschema.MethodDeleteContainer}, false, nil
case "container.get":
return []string{nativeschema.MethodGetContainer}, false, nil
- case "container.setcontainereacl":
- return []string{nativeschema.MethodSetContainerEACL}, false, nil
- case "container.getcontainereacl":
- return []string{nativeschema.MethodGetContainerEACL}, false, nil
case "container.list":
return []string{nativeschema.MethodListContainers}, false, nil
case "container.*":
@@ -269,8 +265,6 @@ func parseAction(lexeme string) ([]string, bool, error) {
nativeschema.MethodPutContainer,
nativeschema.MethodDeleteContainer,
nativeschema.MethodGetContainer,
- nativeschema.MethodSetContainerEACL,
- nativeschema.MethodGetContainerEACL,
nativeschema.MethodListContainers,
}, false, nil
default:
diff --git a/go.mod b/go.mod
index 358370201..19bf7852f 100644
--- a/go.mod
+++ b/go.mod
@@ -4,12 +4,12 @@ go 1.22
require (
code.gitea.io/sdk/gitea v0.17.1
- git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240828085308-5e1c6a908f61
+ git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240902111049-c11f50efeccb
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20231101111734-b3ad3335ff65
- git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240827083309-f0b9493ce3f7
+ git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240903093628-8f751d9dd0ad
git.frostfs.info/TrueCloudLab/hrw v1.2.1
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240814080254-96225afacb88
git.frostfs.info/TrueCloudLab/tzhash v1.8.0
diff --git a/go.sum b/go.sum
index be82bff70..8ebd59157 100644
--- a/go.sum
+++ b/go.sum
@@ -1,7 +1,7 @@
code.gitea.io/sdk/gitea v0.17.1 h1:3jCPOG2ojbl8AcfaUCRYLT5MUcBMFwS0OSK2mA5Zok8=
code.gitea.io/sdk/gitea v0.17.1/go.mod h1:aCnBqhHpoEWA180gMbaCtdX9Pl6BWBAuuP2miadoTNM=
-git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240828085308-5e1c6a908f61 h1:bw9EVGWnfY9awFb5XYR52AGbzgg3o04gZF66yHob48c=
-git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240828085308-5e1c6a908f61/go.mod h1:BDnEpkKMykCS8u1nLzR6SgNzCv6885RWlo5TnravQuI=
+git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240902111049-c11f50efeccb h1:p9ByDsw+H6p6LyYSx8LKFtAG/oPKQpDVMNfjPqdevTw=
+git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240902111049-c11f50efeccb/go.mod h1:BDnEpkKMykCS8u1nLzR6SgNzCv6885RWlo5TnravQuI=
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e h1:kcBqZBiFIUBATUqEuvVigtkJJWQ2Gug/eYXn967o3M4=
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e/go.mod h1:F/fe1OoIDKr5Bz99q4sriuHDuf3aZefZy9ZsCqEtgxc=
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSVCB8JNSfPG7Uk4r2oSk=
@@ -10,8 +10,8 @@ git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d/go.mod h1:7ZZq8iguY7qFsXajdHGmZd2AW4QbucyrJwhbsRfOfek=
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20231101111734-b3ad3335ff65 h1:PaZ8GpnUoXxUoNsc1qp36bT2u7FU+neU4Jn9cl8AWqI=
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20231101111734-b3ad3335ff65/go.mod h1:6aAX80dvJ3r5fjN9CzzPglRptoiPgIC9KFGGsUA+1Hw=
-git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240827083309-f0b9493ce3f7 h1:9eZidZMT4tHOdc6GZRPlZR12IToKqHhUd5wzevdDUqo=
-git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240827083309-f0b9493ce3f7/go.mod h1:VzVYcwo/eXjkdo5ktPdZeAE4fsnZX6zEun3g+5E2weo=
+git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240903093628-8f751d9dd0ad h1:cJGK/bXSF/0KMr6zkIy06qekQhQRU7eYzh+lWdQ0U8w=
+git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240903093628-8f751d9dd0ad/go.mod h1:I4GzeEnftO++N2JHQn9QR88eV0rjQkGqlUCNMnCz1CY=
git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc=
git.frostfs.info/TrueCloudLab/hrw v1.2.1/go.mod h1:C1Ygde2n843yTZEQ0FP69jYiuaYV0kriLvP4zm8JuvM=
git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20240726093631-5481339d6928 h1:LK3mCkNZkY48eBA9jnk1N0eQZLsZhOG+XYw4EBoKUjM=
diff --git a/pkg/network/transport/container/grpc/service.go b/pkg/network/transport/container/grpc/service.go
index f0206dd5c..9fae22b45 100644
--- a/pkg/network/transport/container/grpc/service.go
+++ b/pkg/network/transport/container/grpc/service.go
@@ -80,18 +80,3 @@ func (s *Server) List(ctx context.Context, req *containerGRPC.ListRequest) (*con
return resp.ToGRPCMessage().(*containerGRPC.ListResponse), nil
}
-
-// GetExtendedACL converts gRPC GetExtendedACLRequest message and passes it to internal Container service.
-func (s *Server) GetExtendedACL(ctx context.Context, req *containerGRPC.GetExtendedACLRequest) (*containerGRPC.GetExtendedACLResponse, error) {
- getEACLReq := new(container.GetExtendedACLRequest)
- if err := getEACLReq.FromGRPCMessage(req); err != nil {
- return nil, err
- }
-
- resp, err := s.srv.GetExtendedACL(ctx, getEACLReq)
- if err != nil {
- return nil, err
- }
-
- return resp.ToGRPCMessage().(*containerGRPC.GetExtendedACLResponse), nil
-}
diff --git a/pkg/services/container/ape.go b/pkg/services/container/ape.go
index 6f8a8e0e6..d92ecf58b 100644
--- a/pkg/services/container/ape.go
+++ b/pkg/services/container/ape.go
@@ -102,18 +102,6 @@ func (ac *apeChecker) Get(ctx context.Context, req *container.GetRequest) (*cont
return ac.next.Get(ctx, req)
}
-func (ac *apeChecker) GetExtendedACL(ctx context.Context, req *container.GetExtendedACLRequest) (*container.GetExtendedACLResponse, error) {
- ctx, span := tracing.StartSpanFromContext(ctx, "apeChecker.GetExtendedACL")
- defer span.End()
-
- if err := ac.validateContainerBoundedOperation(ctx, req.GetBody().GetContainerID(), req.GetMetaHeader(), req.GetVerificationHeader(),
- nativeschema.MethodGetContainerEACL); err != nil {
- return nil, err
- }
-
- return ac.next.GetExtendedACL(ctx, req)
-}
-
func (ac *apeChecker) List(ctx context.Context, req *container.ListRequest) (*container.ListResponse, error) {
ctx, span := tracing.StartSpanFromContext(ctx, "apeChecker.List")
defer span.End()
diff --git a/pkg/services/container/ape_test.go b/pkg/services/container/ape_test.go
index 68c1158a6..d6f9b75ef 100644
--- a/pkg/services/container/ape_test.go
+++ b/pkg/services/container/ape_test.go
@@ -49,7 +49,6 @@ func TestAPE(t *testing.T) {
t.Run("deny get container by user claim tag", testDenyGetContainerByUserClaimTag)
t.Run("deny get container by IP", testDenyGetContainerByIP)
t.Run("deny get container by group id", testDenyGetContainerByGroupID)
- t.Run("deny get container eACL for IR with session token", testDenyGetContainerEACLForIRSessionToken)
t.Run("deny put container for others with session token", testDenyPutContainerForOthersSessionToken)
t.Run("deny put container, read namespace from frostfsID", testDenyPutContainerReadNamespaceFromFrostfsID)
t.Run("deny put container with invlaid namespace", testDenyPutContainerInvalidNamespace)
@@ -663,95 +662,6 @@ func testDenyGetContainerByGroupID(t *testing.T) {
require.ErrorAs(t, err, &errAccessDenied)
}
-func testDenyGetContainerEACLForIRSessionToken(t *testing.T) {
- t.Parallel()
- srv := &srvStub{
- calls: map[string]int{},
- }
- router := inmemory.NewInMemory()
- contRdr := &containerStub{
- c: map[cid.ID]*containercore.Container{},
- }
- ir := &irStub{
- keys: [][]byte{},
- }
- nm := &netmapStub{}
- frostfsIDSubjectReader := &frostfsidStub{
- subjects: map[util.Uint160]*client.Subject{},
- }
- apeSrv := NewAPEServer(router, contRdr, ir, nm, frostfsIDSubjectReader, srv)
-
- contID := cidtest.ID()
- testContainer := containertest.Container()
- pp := netmap.PlacementPolicy{}
- require.NoError(t, pp.DecodeString("REP 1"))
- testContainer.SetPlacementPolicy(pp)
- contRdr.c[contID] = &containercore.Container{Value: testContainer}
-
- nm.currentEpoch = 100
- nm.netmaps = map[uint64]*netmap.NetMap{}
- var testNetmap netmap.NetMap
- testNetmap.SetEpoch(nm.currentEpoch)
- testNetmap.SetNodes([]netmap.NodeInfo{{}})
- nm.netmaps[nm.currentEpoch] = &testNetmap
- nm.netmaps[nm.currentEpoch-1] = &testNetmap
-
- _, _, err := router.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.ContainerTarget(contID.EncodeToString()), &chain.Chain{
- Rules: []chain.Rule{
- {
- Status: chain.AccessDenied,
- Actions: chain.Actions{
- Names: []string{
- nativeschema.MethodGetContainerEACL,
- },
- },
- Resources: chain.Resources{
- Names: []string{
- fmt.Sprintf(nativeschema.ResourceFormatRootContainer, contID.EncodeToString()),
- },
- },
- Condition: []chain.Condition{
- {
- Kind: chain.KindRequest,
- Key: nativeschema.PropertyKeyActorRole,
- Value: nativeschema.PropertyValueContainerRoleIR,
- Op: chain.CondStringEquals,
- },
- },
- },
- },
- })
- require.NoError(t, err)
-
- req := &container.GetExtendedACLRequest{}
- req.SetBody(&container.GetExtendedACLRequestBody{})
- var refContID refs.ContainerID
- contID.WriteToV2(&refContID)
- req.GetBody().SetContainerID(&refContID)
-
- pk, err := keys.NewPrivateKey()
- require.NoError(t, err)
- require.NoError(t, signature.SignServiceMessage(&pk.PrivateKey, req))
-
- sessionPK, err := keys.NewPrivateKey()
- require.NoError(t, err)
- sToken := sessiontest.ContainerSigned()
- sToken.ApplyOnlyTo(contID)
- require.NoError(t, sToken.Sign(sessionPK.PrivateKey))
- var sTokenV2 session.Token
- sToken.WriteToV2(&sTokenV2)
- metaHeader := new(session.RequestMetaHeader)
- metaHeader.SetSessionToken(&sTokenV2)
- req.SetMetaHeader(metaHeader)
-
- ir.keys = append(ir.keys, sessionPK.PublicKey().Bytes())
-
- resp, err := apeSrv.GetExtendedACL(context.Background(), req)
- require.Nil(t, resp)
- var errAccessDenied *apistatus.ObjectAccessDenied
- require.ErrorAs(t, err, &errAccessDenied)
-}
-
func testDenyPutContainerForOthersSessionToken(t *testing.T) {
t.Parallel()
srv := &srvStub{
@@ -1164,11 +1074,6 @@ func (s *srvStub) Get(context.Context, *container.GetRequest) (*container.GetRes
return &container.GetResponse{}, nil
}
-func (s *srvStub) GetExtendedACL(context.Context, *container.GetExtendedACLRequest) (*container.GetExtendedACLResponse, error) {
- s.calls["GetExtendedACL"]++
- return &container.GetExtendedACLResponse{}, nil
-}
-
func (s *srvStub) List(context.Context, *container.ListRequest) (*container.ListResponse, error) {
s.calls["List"]++
return &container.ListResponse{}, nil
diff --git a/pkg/services/container/audit.go b/pkg/services/container/audit.go
index 34fd5923f..b257272f5 100644
--- a/pkg/services/container/audit.go
+++ b/pkg/services/container/audit.go
@@ -52,17 +52,6 @@ func (a *auditService) Get(ctx context.Context, req *container.GetRequest) (*con
return res, err
}
-// GetExtendedACL implements Server.
-func (a *auditService) GetExtendedACL(ctx context.Context, req *container.GetExtendedACLRequest) (*container.GetExtendedACLResponse, error) {
- res, err := a.next.GetExtendedACL(ctx, req)
- if !a.enabled.Load() {
- return res, err
- }
- audit.LogRequest(a.log, container_grpc.ContainerService_GetExtendedACL_FullMethodName, req,
- audit.TargetFromRef(req.GetBody().GetContainerID(), &cid.ID{}), err == nil)
- return res, err
-}
-
// List implements Server.
func (a *auditService) List(ctx context.Context, req *container.ListRequest) (*container.ListResponse, error) {
res, err := a.next.List(ctx, req)
diff --git a/pkg/services/container/executor.go b/pkg/services/container/executor.go
index b64963e25..0917e3bd0 100644
--- a/pkg/services/container/executor.go
+++ b/pkg/services/container/executor.go
@@ -14,7 +14,6 @@ type ServiceExecutor interface {
Delete(context.Context, *session.Token, *container.DeleteRequestBody) (*container.DeleteResponseBody, error)
Get(context.Context, *container.GetRequestBody) (*container.GetResponseBody, error)
List(context.Context, *container.ListRequestBody) (*container.ListResponseBody, error)
- GetExtendedACL(context.Context, *container.GetExtendedACLRequestBody) (*container.GetExtendedACLResponseBody, error)
}
type executorSvc struct {
@@ -94,16 +93,3 @@ func (s *executorSvc) List(ctx context.Context, req *container.ListRequest) (*co
s.respSvc.SetMeta(resp)
return resp, nil
}
-
-func (s *executorSvc) GetExtendedACL(ctx context.Context, req *container.GetExtendedACLRequest) (*container.GetExtendedACLResponse, error) {
- respBody, err := s.exec.GetExtendedACL(ctx, req.GetBody())
- if err != nil {
- return nil, fmt.Errorf("could not execute GetEACL request: %w", err)
- }
-
- resp := new(container.GetExtendedACLResponse)
- resp.SetBody(respBody)
-
- s.respSvc.SetMeta(resp)
- return resp, nil
-}
diff --git a/pkg/services/container/morph/executor.go b/pkg/services/container/morph/executor.go
index 57dac32f0..05d8749cf 100644
--- a/pkg/services/container/morph/executor.go
+++ b/pkg/services/container/morph/executor.go
@@ -201,40 +201,3 @@ func (s *morphExecutor) List(_ context.Context, body *container.ListRequestBody)
return res, nil
}
-
-func (s *morphExecutor) GetExtendedACL(_ context.Context, body *container.GetExtendedACLRequestBody) (*container.GetExtendedACLResponseBody, error) {
- idV2 := body.GetContainerID()
- if idV2 == nil {
- return nil, errors.New("missing container ID")
- }
-
- var id cid.ID
-
- err := id.ReadFromV2(*idV2)
- if err != nil {
- return nil, fmt.Errorf("invalid container ID: %w", err)
- }
-
- eaclInfo, err := s.rdr.GetEACL(id)
- if err != nil {
- return nil, err
- }
-
- var sigV2 refs.Signature
- eaclInfo.Signature.WriteToV2(&sigV2)
-
- var tokV2 *sessionV2.Token
-
- if eaclInfo.Session != nil {
- tokV2 = new(sessionV2.Token)
-
- eaclInfo.Session.WriteToV2(tokV2)
- }
-
- res := new(container.GetExtendedACLResponseBody)
- res.SetEACL(eaclInfo.Value.ToV2())
- res.SetSignature(&sigV2)
- res.SetSessionToken(tokV2)
-
- return res, nil
-}
diff --git a/pkg/services/container/server.go b/pkg/services/container/server.go
index d714d7f02..a19d83c56 100644
--- a/pkg/services/container/server.go
+++ b/pkg/services/container/server.go
@@ -12,5 +12,4 @@ type Server interface {
Get(context.Context, *container.GetRequest) (*container.GetResponse, error)
Delete(context.Context, *container.DeleteRequest) (*container.DeleteResponse, error)
List(context.Context, *container.ListRequest) (*container.ListResponse, error)
- GetExtendedACL(context.Context, *container.GetExtendedACLRequest) (*container.GetExtendedACLResponse, error)
}
diff --git a/pkg/services/container/sign.go b/pkg/services/container/sign.go
index 62aa3fe27..f7f5d6486 100644
--- a/pkg/services/container/sign.go
+++ b/pkg/services/container/sign.go
@@ -56,12 +56,3 @@ func (s *signService) List(ctx context.Context, req *container.ListRequest) (*co
resp, err := util.EnsureNonNilResponse(s.svc.List(ctx, req))
return resp, s.sigSvc.SignResponse(resp, err)
}
-
-func (s *signService) GetExtendedACL(ctx context.Context, req *container.GetExtendedACLRequest) (*container.GetExtendedACLResponse, error) {
- if err := s.sigSvc.VerifyRequest(req); err != nil {
- resp := new(container.GetExtendedACLResponse)
- return resp, s.sigSvc.SignResponse(resp, err)
- }
- resp, err := util.EnsureNonNilResponse(s.svc.GetExtendedACL(ctx, req))
- return resp, s.sigSvc.SignResponse(resp, err)
-}
From b9043433a098ae242c559a79afe6f1ced5068e68 Mon Sep 17 00:00:00 2001
From: Aleksey Savchuk
Date: Mon, 15 Jul 2024 14:07:32 +0300
Subject: [PATCH 071/705] [#1223] scripts: Add script to populate metabase
Signed-off-by: Aleksey Savchuk
---
.../populate-metabase/internal/generate.go | 132 +++++++++
.../populate-metabase/internal/populate.go | 263 ++++++++++++++++++
scripts/populate-metabase/main.go | 159 +++++++++++
3 files changed, 554 insertions(+)
create mode 100644 scripts/populate-metabase/internal/generate.go
create mode 100644 scripts/populate-metabase/internal/populate.go
create mode 100644 scripts/populate-metabase/main.go
diff --git a/scripts/populate-metabase/internal/generate.go b/scripts/populate-metabase/internal/generate.go
new file mode 100644
index 000000000..d2004b673
--- /dev/null
+++ b/scripts/populate-metabase/internal/generate.go
@@ -0,0 +1,132 @@
+package internal
+
+import (
+ "crypto/sha256"
+ "fmt"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
+ objecttest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/test"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
+ usertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user/test"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version"
+ "git.frostfs.info/TrueCloudLab/tzhash/tz"
+ "golang.org/x/exp/rand"
+)
+
+func GeneratePayloadPool(count uint, size uint) [][]byte {
+ pool := [][]byte{}
+ for i := uint(0); i < count; i++ {
+ payload := make([]byte, size)
+ _, _ = rand.Read(payload)
+
+ pool = append(pool, payload)
+ }
+ return pool
+}
+
+func GenerateAttributePool(count uint) []objectSDK.Attribute {
+ pool := []objectSDK.Attribute{}
+ for i := uint(0); i < count; i++ {
+ for j := uint(0); j < count; j++ {
+ attr := *objectSDK.NewAttribute()
+ attr.SetKey(fmt.Sprintf("key%d", i))
+ attr.SetValue(fmt.Sprintf("value%d", j))
+ pool = append(pool, attr)
+ }
+ }
+ return pool
+}
+
+func GenerateOwnerPool(count uint) []user.ID {
+ pool := []user.ID{}
+ for i := uint(0); i < count; i++ {
+ pool = append(pool, usertest.ID())
+ }
+ return pool
+}
+
+type ObjectOption func(obj *objectSDK.Object)
+
+func GenerateObject(options ...ObjectOption) *objectSDK.Object {
+ var ver version.Version
+ ver.SetMajor(2)
+ ver.SetMinor(1)
+
+ payload := make([]byte, 0)
+
+ var csum checksum.Checksum
+ csum.SetSHA256(sha256.Sum256(payload))
+
+ var csumTZ checksum.Checksum
+ csumTZ.SetTillichZemor(tz.Sum(csum.Value()))
+
+ obj := objectSDK.New()
+ obj.SetID(oidtest.ID())
+ obj.SetOwnerID(usertest.ID())
+ obj.SetContainerID(cidtest.ID())
+
+ header := objecttest.Object().GetECHeader()
+ header.SetParent(oidtest.ID())
+ obj.SetECHeader(header)
+
+ obj.SetVersion(&ver)
+ obj.SetPayload(payload)
+ obj.SetPayloadSize(uint64(len(payload)))
+ obj.SetPayloadChecksum(csum)
+ obj.SetPayloadHomomorphicHash(csumTZ)
+
+ for _, option := range options {
+ option(obj)
+ }
+
+ return obj
+}
+
+func WithContainerID(cid cid.ID) ObjectOption {
+ return func(obj *objectSDK.Object) {
+ obj.SetContainerID(cid)
+ }
+}
+
+func WithType(typ objectSDK.Type) ObjectOption {
+ return func(obj *objectSDK.Object) {
+ obj.SetType(typ)
+ }
+}
+
+func WithPayloadFromPool(pool [][]byte) ObjectOption {
+ payload := pool[rand.Intn(len(pool))]
+
+ var csum checksum.Checksum
+ csum.SetSHA256(sha256.Sum256(payload))
+
+ var csumTZ checksum.Checksum
+ csumTZ.SetTillichZemor(tz.Sum(csum.Value()))
+
+ return func(obj *objectSDK.Object) {
+ obj.SetPayload(payload)
+ obj.SetPayloadSize(uint64(len(payload)))
+ obj.SetPayloadChecksum(csum)
+ obj.SetPayloadHomomorphicHash(csumTZ)
+ }
+}
+
+func WithAttributesFromPool(pool []objectSDK.Attribute, count uint) ObjectOption {
+ return func(obj *objectSDK.Object) {
+ attrs := []objectSDK.Attribute{}
+ for i := uint(0); i < count; i++ {
+ attrs = append(attrs, pool[rand.Intn(len(pool))])
+ }
+ obj.SetAttributes(attrs...)
+ }
+}
+
+func WithOwnerIDFromPool(pool []user.ID) ObjectOption {
+ return func(obj *objectSDK.Object) {
+ obj.SetOwnerID(pool[rand.Intn(len(pool))])
+ }
+}
diff --git a/scripts/populate-metabase/internal/populate.go b/scripts/populate-metabase/internal/populate.go
new file mode 100644
index 000000000..390c1cdc0
--- /dev/null
+++ b/scripts/populate-metabase/internal/populate.go
@@ -0,0 +1,263 @@
+package internal
+
+import (
+ "context"
+ "fmt"
+ "math/rand"
+ "sync"
+
+ meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/transformer"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
+ "golang.org/x/sync/errgroup"
+)
+
+type EpochState struct{}
+
+func (s EpochState) CurrentEpoch() uint64 {
+ return 0
+}
+
+func PopulateWithObjects(
+ ctx context.Context,
+ db *meta.DB,
+ group *errgroup.Group,
+ count uint,
+ factory func() *objectSDK.Object,
+) {
+ digits := "0123456789"
+
+ for i := uint(0); i < count; i++ {
+ obj := factory()
+
+ id := []byte(fmt.Sprintf(
+ "%c/%c/%c",
+ digits[rand.Int()%len(digits)],
+ digits[rand.Int()%len(digits)],
+ digits[rand.Int()%len(digits)],
+ ))
+
+ prm := meta.PutPrm{}
+ prm.SetObject(obj)
+ prm.SetStorageID(id)
+
+ group.Go(func() error {
+ if _, err := db.Put(ctx, prm); err != nil {
+ return fmt.Errorf("couldn't put an object: %w", err)
+ }
+ return nil
+ })
+ }
+}
+
+func PopulateWithBigObjects(
+ ctx context.Context,
+ db *meta.DB,
+ group *errgroup.Group,
+ count uint,
+ factory func() *objectSDK.Object,
+) {
+ for i := uint(0); i < count; i++ {
+ group.Go(func() error {
+ if err := populateWithBigObject(ctx, db, factory); err != nil {
+ return fmt.Errorf("couldn't put a big object: %w", err)
+ }
+ return nil
+ })
+ }
+}
+
+func populateWithBigObject(
+ ctx context.Context,
+ db *meta.DB,
+ factory func() *objectSDK.Object,
+) error {
+ t := &target{db: db}
+
+ pk, _ := keys.NewPrivateKey()
+ p := transformer.NewPayloadSizeLimiter(transformer.Params{
+ Key: &pk.PrivateKey,
+ NextTargetInit: func() transformer.ObjectWriter { return t },
+ NetworkState: EpochState{},
+ MaxSize: 10,
+ })
+
+ obj := factory()
+ payload := make([]byte, 30)
+
+ err := p.WriteHeader(ctx, obj)
+ if err != nil {
+ return err
+ }
+
+ _, err = p.Write(ctx, payload)
+ if err != nil {
+ return err
+ }
+
+ _, err = p.Close(ctx)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+type target struct {
+ db *meta.DB
+}
+
+func (t *target) WriteObject(ctx context.Context, obj *objectSDK.Object) error {
+ prm := meta.PutPrm{}
+ prm.SetObject(obj)
+
+ _, err := t.db.Put(ctx, prm)
+ return err
+}
+
+func PopulateGraveyard(
+ ctx context.Context,
+ db *meta.DB,
+ group *errgroup.Group,
+ workBufferSize int,
+ count uint,
+ factory func() *objectSDK.Object,
+) {
+ ts := factory()
+ ts.SetType(objectSDK.TypeTombstone)
+
+ prm := meta.PutPrm{}
+ prm.SetObject(ts)
+
+ group.Go(func() error {
+ if _, err := db.Put(ctx, prm); err != nil {
+ return fmt.Errorf("couldn't put a tombstone object: %w", err)
+ }
+ return nil
+ })
+
+ cID, _ := ts.ContainerID()
+ oID, _ := ts.ID()
+
+ var tsAddr oid.Address
+
+ tsAddr.SetContainer(cID)
+ tsAddr.SetObject(oID)
+
+ addrs := make(chan oid.Address, workBufferSize)
+
+ go func() {
+ defer close(addrs)
+
+ wg := &sync.WaitGroup{}
+ wg.Add(int(count))
+
+ for i := uint(0); i < count; i++ {
+ obj := factory()
+
+ prm := meta.PutPrm{}
+ prm.SetObject(obj)
+
+ group.Go(func() error {
+ defer wg.Done()
+
+ if _, err := db.Put(ctx, prm); err != nil {
+ return fmt.Errorf("couldn't put an object: %w", err)
+ }
+
+ cID, _ := obj.ContainerID()
+ oID, _ := obj.ID()
+
+ var addr oid.Address
+ addr.SetContainer(cID)
+ addr.SetObject(oID)
+
+ addrs <- addr
+ return nil
+ })
+ }
+ wg.Wait()
+ }()
+
+ go func() {
+ for addr := range addrs {
+ prm := meta.InhumePrm{}
+ prm.SetAddresses(addr)
+ prm.SetTombstoneAddress(tsAddr)
+
+ group.Go(func() error {
+ if _, err := db.Inhume(ctx, prm); err != nil {
+ return fmt.Errorf("couldn't inhume an object: %w", err)
+ }
+ return nil
+ })
+ }
+ }()
+}
+
+func PopulateLocked(
+ ctx context.Context,
+ db *meta.DB,
+ group *errgroup.Group,
+ workBufferSize int,
+ count uint,
+ factory func() *objectSDK.Object,
+) {
+ locker := factory()
+ locker.SetType(objectSDK.TypeLock)
+
+ prm := meta.PutPrm{}
+ prm.SetObject(locker)
+
+ group.Go(func() error {
+ if _, err := db.Put(ctx, prm); err != nil {
+ return fmt.Errorf("couldn't put a locker object: %w", err)
+ }
+ return nil
+ })
+
+ ids := make(chan oid.ID, workBufferSize)
+
+ go func() {
+ defer close(ids)
+
+ wg := &sync.WaitGroup{}
+ wg.Add(int(count))
+
+ for i := uint(0); i < count; i++ {
+ defer wg.Done()
+
+ obj := factory()
+
+ prm := meta.PutPrm{}
+ prm.SetObject(obj)
+
+ group.Go(func() error {
+ if _, err := db.Put(ctx, prm); err != nil {
+ return fmt.Errorf("couldn't put an object: %w", err)
+ }
+
+ id, _ := obj.ID()
+ ids <- id
+ return nil
+ })
+ }
+ wg.Wait()
+ }()
+
+ go func() {
+ for id := range ids {
+ lockerCID, _ := locker.ContainerID()
+ lockerOID, _ := locker.ID()
+
+ group.Go(func() error {
+ if err := db.Lock(ctx, lockerCID, lockerOID, []oid.ID{id}); err != nil {
+ return fmt.Errorf("couldn't lock an object: %w", err)
+ }
+ return nil
+ })
+ }
+ }()
+}
diff --git a/scripts/populate-metabase/main.go b/scripts/populate-metabase/main.go
new file mode 100644
index 000000000..2bc7a5553
--- /dev/null
+++ b/scripts/populate-metabase/main.go
@@ -0,0 +1,159 @@
+package main
+
+import (
+ "context"
+ "errors"
+ "flag"
+ "fmt"
+ "os"
+
+ meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/scripts/populate-metabase/internal"
+ cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ "golang.org/x/sync/errgroup"
+)
+
+var (
+ path string
+ force bool
+ jobs uint
+
+ numContainers,
+ numObjects,
+ numAttributesPerObj,
+ numOwners,
+ numPayloads,
+ numAttributes uint
+)
+
+func main() {
+ flag.StringVar(&path, "path", "", "Path to metabase")
+ flag.BoolVar(&force, "force", false, "Rewrite existing database")
+ flag.UintVar(&jobs, "j", 10000, "Number of jobs to run")
+
+ flag.UintVar(&numContainers, "containers", 0, "Number of containers to be created")
+ flag.UintVar(&numObjects, "objects", 0, "Number of objects per container")
+ flag.UintVar(&numAttributesPerObj, "attributes", 0, "Number of attributes per object")
+
+ flag.UintVar(&numOwners, "distinct-owners", 10, "Number of distinct owners to be used")
+ flag.UintVar(&numPayloads, "distinct-payloads", 10, "Number of distinct payloads to be used")
+ flag.UintVar(&numAttributes, "distinct-attributes", 10, "Number of distinct attributes to be used")
+
+ flag.Parse()
+
+ exitIf(numPayloads == 0, "must have payloads\n")
+ exitIf(numAttributes == 0, "must have attributes\n")
+ exitIf(numOwners == 0, "must have owners\n")
+ exitIf(len(path) == 0, "path to metabase not specified\n")
+ exitIf(
+ numAttributesPerObj > numAttributes,
+ "object can't have more attributes than available\n",
+ )
+
+ info, err := os.Stat(path)
+ exitIf(
+ err != nil && !errors.Is(err, os.ErrNotExist),
+ "couldn't get path info: %s\n", err,
+ )
+
+ // Path exits.
+ if err == nil {
+ exitIf(info.IsDir(), "path is a directory\n")
+ exitIf(!force, "couldn't rewrite existing file, use '-force' flag\n")
+
+ err = os.Remove(path)
+ exitIf(err != nil, "couldn't remove existing file: %s\n", err)
+ }
+
+ err = populate()
+ exitIf(err != nil, "couldn't populate the metabase: %s\n", err)
+}
+
+func getObjectFactory(opts ...internal.ObjectOption) func() *objectSDK.Object {
+ return func() *objectSDK.Object {
+ return internal.GenerateObject(opts...)
+ }
+}
+
+func populate() (err error) {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ db := meta.New([]meta.Option{
+ meta.WithPath(path),
+ meta.WithPermissions(0o600),
+ meta.WithEpochState(internal.EpochState{}),
+ }...)
+
+ if err = db.Open(ctx, mode.ReadWrite); err != nil {
+ return fmt.Errorf("couldn't open the metabase: %w", err)
+ }
+ defer func() {
+ if errOnClose := db.Close(); errOnClose != nil {
+ err = errors.Join(
+ err,
+ fmt.Errorf("couldn't close the metabase: %w", db.Close()),
+ )
+ }
+ }()
+
+ if err = db.Init(); err != nil {
+ return fmt.Errorf("couldn't init the metabase: %w", err)
+ }
+
+ payloads := internal.GeneratePayloadPool(numPayloads, 32)
+ attributes := internal.GenerateAttributePool(numAttributes)
+ owners := internal.GenerateOwnerPool(numOwners)
+
+ types := []objectSDK.Type{
+ objectSDK.TypeRegular,
+ objectSDK.TypeLock,
+ objectSDK.TypeTombstone,
+ }
+
+ eg, ctx := errgroup.WithContext(ctx)
+ eg.SetLimit(int(jobs))
+
+ for i := uint(0); i < numContainers; i++ {
+ cid := cidtest.ID()
+
+ for _, typ := range types {
+ internal.PopulateWithObjects(ctx, db, eg, numObjects, getObjectFactory(
+ internal.WithContainerID(cid),
+ internal.WithType(typ),
+ internal.WithPayloadFromPool(payloads),
+ internal.WithOwnerIDFromPool(owners),
+ internal.WithAttributesFromPool(attributes, numAttributesPerObj),
+ ))
+ }
+ internal.PopulateWithBigObjects(ctx, db, eg, numObjects, getObjectFactory(
+ internal.WithContainerID(cid),
+ internal.WithType(objectSDK.TypeRegular),
+ internal.WithAttributesFromPool(attributes, numAttributesPerObj),
+ internal.WithOwnerIDFromPool(owners),
+ ))
+ internal.PopulateGraveyard(ctx, db, eg, int(jobs), numObjects, getObjectFactory(
+ internal.WithContainerID(cid),
+ internal.WithType(objectSDK.TypeRegular),
+ internal.WithAttributesFromPool(attributes, numAttributesPerObj),
+ internal.WithOwnerIDFromPool(owners),
+ ))
+ internal.PopulateLocked(ctx, db, eg, int(jobs), numObjects, getObjectFactory(
+ internal.WithContainerID(cid),
+ internal.WithType(objectSDK.TypeRegular),
+ internal.WithAttributesFromPool(attributes, numAttributesPerObj),
+ internal.WithOwnerIDFromPool(owners),
+ ))
+ }
+
+ return eg.Wait()
+}
+
+func exitIf(cond bool, format string, args ...any) {
+ if cond {
+ fmt.Fprintf(os.Stderr, format, args...)
+ os.Exit(1)
+ }
+}
From 1ae86f35a8d95a3f2258eaefb772482d0af873f6 Mon Sep 17 00:00:00 2001
From: Aleksey Savchuk
Date: Thu, 18 Jul 2024 18:26:11 +0300
Subject: [PATCH 072/705] [#1223] lens/tui: Add metabase schema
Signed-off-by: Aleksey Savchuk
---
.../internal/schema/common/format.go | 43 +++
.../internal/schema/common/raw.go | 29 ++
.../internal/schema/common/schema.go | 81 ++++++
.../schema/metabase/buckets/detailed.go | 29 ++
.../schema/metabase/buckets/filter.go | 81 ++++++
.../schema/metabase/buckets/parsers.go | 111 ++++++++
.../schema/metabase/buckets/prefix.go | 53 ++++
.../schema/metabase/buckets/string.go | 48 ++++
.../internal/schema/metabase/buckets/types.go | 166 ++++++++++++
.../internal/schema/metabase/parser.go | 29 ++
.../schema/metabase/records/detailed.go | 65 +++++
.../schema/metabase/records/filter.go | 145 ++++++++++
.../schema/metabase/records/parsers.go | 251 ++++++++++++++++++
.../schema/metabase/records/string.go | 135 ++++++++++
.../internal/schema/metabase/records/types.go | 82 ++++++
.../internal/schema/metabase/records/util.go | 20 ++
go.mod | 5 +-
go.sum | 9 +
18 files changed, 1381 insertions(+), 1 deletion(-)
create mode 100644 cmd/frostfs-lens/internal/schema/common/format.go
create mode 100644 cmd/frostfs-lens/internal/schema/common/raw.go
create mode 100644 cmd/frostfs-lens/internal/schema/common/schema.go
create mode 100644 cmd/frostfs-lens/internal/schema/metabase/buckets/detailed.go
create mode 100644 cmd/frostfs-lens/internal/schema/metabase/buckets/filter.go
create mode 100644 cmd/frostfs-lens/internal/schema/metabase/buckets/parsers.go
create mode 100644 cmd/frostfs-lens/internal/schema/metabase/buckets/prefix.go
create mode 100644 cmd/frostfs-lens/internal/schema/metabase/buckets/string.go
create mode 100644 cmd/frostfs-lens/internal/schema/metabase/buckets/types.go
create mode 100644 cmd/frostfs-lens/internal/schema/metabase/parser.go
create mode 100644 cmd/frostfs-lens/internal/schema/metabase/records/detailed.go
create mode 100644 cmd/frostfs-lens/internal/schema/metabase/records/filter.go
create mode 100644 cmd/frostfs-lens/internal/schema/metabase/records/parsers.go
create mode 100644 cmd/frostfs-lens/internal/schema/metabase/records/string.go
create mode 100644 cmd/frostfs-lens/internal/schema/metabase/records/types.go
create mode 100644 cmd/frostfs-lens/internal/schema/metabase/records/util.go
diff --git a/cmd/frostfs-lens/internal/schema/common/format.go b/cmd/frostfs-lens/internal/schema/common/format.go
new file mode 100644
index 000000000..4ed7e96f2
--- /dev/null
+++ b/cmd/frostfs-lens/internal/schema/common/format.go
@@ -0,0 +1,43 @@
+package common
+
+import (
+ "fmt"
+ "strconv"
+
+ "github.com/gdamore/tcell/v2"
+)
+
+type FormatOptions struct {
+ Color tcell.Color
+
+ Bold,
+ Italic,
+ Underline,
+ StrikeThrough bool
+}
+
+func Format(s string, opts FormatOptions) string {
+ var boldTag, italicTag, underlineTag, strikeThroughTag string
+
+ switch {
+ case opts.Bold:
+ boldTag = "b"
+ case opts.Italic:
+ italicTag = "i"
+ case opts.Underline:
+ underlineTag = "u"
+ case opts.StrikeThrough:
+ strikeThroughTag = "s"
+ }
+
+ attrs := fmt.Sprintf(
+ "%s%s%s%s", boldTag, italicTag, underlineTag, strikeThroughTag,
+ )
+ color := strconv.FormatInt(int64(opts.Color.Hex()), 16)
+
+ return fmt.Sprintf("[#%06s::%s]%s[-::-]", color, attrs, s)
+}
+
+func FormatSimple(s string, c tcell.Color) string {
+ return Format(s, FormatOptions{Color: c})
+}
diff --git a/cmd/frostfs-lens/internal/schema/common/raw.go b/cmd/frostfs-lens/internal/schema/common/raw.go
new file mode 100644
index 000000000..0990e24c3
--- /dev/null
+++ b/cmd/frostfs-lens/internal/schema/common/raw.go
@@ -0,0 +1,29 @@
+package common
+
+import (
+ "github.com/davecgh/go-spew/spew"
+ "github.com/gdamore/tcell/v2"
+ "github.com/mr-tron/base58"
+)
+
+type RawEntry struct {
+ key, value []byte
+}
+
+var RawParser Parser = rawParser
+
+func rawParser(key, value []byte) (SchemaEntry, Parser, error) {
+ return &RawEntry{key: key, value: value}, rawParser, nil
+}
+
+func (r *RawEntry) String() string {
+ return FormatSimple(base58.Encode(r.key), tcell.ColorRed)
+}
+
+func (r *RawEntry) DetailedString() string {
+ return spew.Sdump(r)
+}
+
+func (r *RawEntry) Filter(string, any) FilterResult {
+ return No
+}
diff --git a/cmd/frostfs-lens/internal/schema/common/schema.go b/cmd/frostfs-lens/internal/schema/common/schema.go
new file mode 100644
index 000000000..9bad19032
--- /dev/null
+++ b/cmd/frostfs-lens/internal/schema/common/schema.go
@@ -0,0 +1,81 @@
+package common
+
+import (
+ "errors"
+ "fmt"
+)
+
+type FilterResult byte
+
+const (
+ No FilterResult = iota
+ Maybe
+ Yes
+)
+
+func IfThenElse(condition bool, onSuccess, onFailure FilterResult) FilterResult {
+ var res FilterResult
+ if condition {
+ res = onSuccess
+ } else {
+ res = onFailure
+ }
+ return res
+}
+
+type SchemaEntry interface {
+ String() string
+ DetailedString() string
+ Filter(typ string, val any) FilterResult
+}
+
+type (
+ Parser func(key, value []byte) (SchemaEntry, Parser, error)
+ FallbackParser func(key, value []byte) (SchemaEntry, Parser)
+)
+
+func Any(parsers ...Parser) Parser {
+ return func(key, value []byte) (SchemaEntry, Parser, error) {
+ var errs error
+ for _, parser := range parsers {
+ ret, next, err := parser(key, value)
+ if err == nil {
+ return ret, next, nil
+ }
+ errs = errors.Join(errs, err)
+ }
+ return nil, nil, fmt.Errorf("no parser succeeded: %w", errs)
+ }
+}
+
+func WithFallback(parser Parser, fallback FallbackParser) Parser {
+ if parser == nil {
+ return fallback.ToParser()
+ }
+ return func(key, value []byte) (SchemaEntry, Parser, error) {
+ entry, next, err := parser(key, value)
+ if err == nil {
+ return entry, WithFallback(next, fallback), nil
+ }
+ return fallback.ToParser()(key, value)
+ }
+}
+
+func (fp FallbackParser) ToParser() Parser {
+ return func(key, value []byte) (SchemaEntry, Parser, error) {
+ entry, next := fp(key, value)
+ return entry, next, nil
+ }
+}
+
+func (p Parser) ToFallbackParser() FallbackParser {
+ return func(key, value []byte) (SchemaEntry, Parser) {
+ entry, next, err := p(key, value)
+ if err != nil {
+ panic(fmt.Errorf(
+ "couldn't use that parser as a fallback parser, it returned an error: %w", err,
+ ))
+ }
+ return entry, next
+ }
+}
diff --git a/cmd/frostfs-lens/internal/schema/metabase/buckets/detailed.go b/cmd/frostfs-lens/internal/schema/metabase/buckets/detailed.go
new file mode 100644
index 000000000..6a08a723e
--- /dev/null
+++ b/cmd/frostfs-lens/internal/schema/metabase/buckets/detailed.go
@@ -0,0 +1,29 @@
+package buckets
+
+import (
+ "github.com/davecgh/go-spew/spew"
+)
+
+func (b *PrefixBucket) DetailedString() string {
+ return spew.Sdump(*b)
+}
+
+func (b *PrefixContainerBucket) DetailedString() string {
+ return spew.Sdump(*b)
+}
+
+func (b *UserBucket) DetailedString() string {
+ return spew.Sdump(*b)
+}
+
+func (b *ContainerBucket) DetailedString() string {
+ return spew.Sdump(*b)
+}
+
+func (b *UserAttributeKeyBucket) DetailedString() string {
+ return spew.Sdump(*b)
+}
+
+func (b *UserAttributeValueBucket) DetailedString() string {
+ return spew.Sdump(*b)
+}
diff --git a/cmd/frostfs-lens/internal/schema/metabase/buckets/filter.go b/cmd/frostfs-lens/internal/schema/metabase/buckets/filter.go
new file mode 100644
index 000000000..891c4004f
--- /dev/null
+++ b/cmd/frostfs-lens/internal/schema/metabase/buckets/filter.go
@@ -0,0 +1,81 @@
+package buckets
+
+import (
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+)
+
+func (b *PrefixBucket) Filter(typ string, _ any) common.FilterResult {
+ switch typ {
+ case "cid":
+ return b.resolvers.cidResolver(false)
+ case "oid":
+ return b.resolvers.oidResolver(false)
+ default:
+ return common.No
+ }
+}
+
+func (b *PrefixContainerBucket) Filter(typ string, val any) common.FilterResult {
+ switch typ {
+ case "cid":
+ id := val.(cid.ID)
+ return b.resolvers.cidResolver(b.id.Equals(id))
+ case "oid":
+ return b.resolvers.oidResolver(false)
+ default:
+ return common.No
+ }
+}
+
+func (b *UserBucket) Filter(typ string, _ any) common.FilterResult {
+ switch typ {
+ case "cid":
+ return b.resolvers.cidResolver(false)
+ case "oid":
+ return b.resolvers.oidResolver(false)
+ default:
+ return common.No
+ }
+}
+
+func (b *ContainerBucket) Filter(typ string, val any) common.FilterResult {
+ switch typ {
+ case "cid":
+ id := val.(cid.ID)
+ return b.resolvers.cidResolver(b.id.Equals(id))
+ case "oid":
+ return b.resolvers.oidResolver(false)
+ default:
+ return common.No
+ }
+}
+
+func (b *UserAttributeKeyBucket) Filter(typ string, val any) common.FilterResult {
+ switch typ {
+ case "cid":
+ id := val.(cid.ID)
+ return common.IfThenElse(b.id.Equals(id), common.Yes, common.No)
+ case "oid":
+ return common.Maybe
+ case "key":
+ key := val.(string)
+ return common.IfThenElse(b.key == key, common.Yes, common.No)
+ case "value":
+ return common.Maybe
+ default:
+ return common.No
+ }
+}
+
+func (b *UserAttributeValueBucket) Filter(typ string, val any) common.FilterResult {
+ switch typ {
+ case "oid":
+ return common.Maybe
+ case "value":
+ value := val.(string)
+ return common.IfThenElse(b.value == value, common.Yes, common.No)
+ default:
+ return common.No
+ }
+}
diff --git a/cmd/frostfs-lens/internal/schema/metabase/buckets/parsers.go b/cmd/frostfs-lens/internal/schema/metabase/buckets/parsers.go
new file mode 100644
index 000000000..24cc0e52d
--- /dev/null
+++ b/cmd/frostfs-lens/internal/schema/metabase/buckets/parsers.go
@@ -0,0 +1,111 @@
+package buckets
+
+import (
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/metabase/records"
+)
+
+var (
+ GraveyardParser = NewPrefixBucketParser(Graveyard, records.GraveyardRecordParser, Resolvers{
+ cidResolver: LenientResolver,
+ oidResolver: LenientResolver,
+ })
+
+ GarbageParser = NewPrefixBucketParser(Garbage, records.GarbageRecordParser, Resolvers{
+ cidResolver: LenientResolver,
+ oidResolver: LenientResolver,
+ })
+
+ ContainerVolumeParser = NewPrefixBucketParser(ContainerVolume, records.ContainerVolumeRecordParser, Resolvers{
+ cidResolver: LenientResolver,
+ oidResolver: StrictResolver,
+ })
+
+ LockedParser = NewPrefixBucketParser(
+ Locked,
+ NewContainerBucketParser(
+ records.LockedRecordParser,
+ Resolvers{
+ cidResolver: StrictResolver,
+ oidResolver: LenientResolver,
+ },
+ ),
+ Resolvers{
+ cidResolver: LenientResolver,
+ oidResolver: LenientResolver,
+ },
+ )
+
+ ShardInfoParser = NewPrefixBucketParser(ShardInfo, records.ShardInfoRecordParser, Resolvers{
+ cidResolver: StrictResolver,
+ oidResolver: StrictResolver,
+ })
+
+ PrimaryParser = NewPrefixContainerBucketParser(Primary, records.ObjectRecordParser, Resolvers{
+ cidResolver: StrictResolver,
+ oidResolver: LenientResolver,
+ })
+
+ LockersParser = NewPrefixContainerBucketParser(Lockers, records.ObjectRecordParser, Resolvers{
+ cidResolver: StrictResolver,
+ oidResolver: LenientResolver,
+ })
+
+ TombstoneParser = NewPrefixContainerBucketParser(Tombstone, records.ObjectRecordParser, Resolvers{
+ cidResolver: StrictResolver,
+ oidResolver: LenientResolver,
+ })
+
+ SmallParser = NewPrefixContainerBucketParser(Small, records.SmallRecordParser, Resolvers{
+ cidResolver: StrictResolver,
+ oidResolver: LenientResolver,
+ })
+
+ RootParser = NewPrefixContainerBucketParser(Root, records.RootRecordParser, Resolvers{
+ cidResolver: StrictResolver,
+ oidResolver: LenientResolver,
+ })
+
+ OwnerParser = NewPrefixContainerBucketParser(
+ Owner,
+ NewUserBucketParser(
+ records.OwnerRecordParser,
+ Resolvers{
+ cidResolver: StrictResolver,
+ oidResolver: LenientResolver,
+ },
+ ),
+ Resolvers{
+ cidResolver: StrictResolver,
+ oidResolver: LenientResolver,
+ },
+ )
+
+ UserAttributeParser = NewUserAttributeKeyBucketParser(
+ NewUserAttributeValueBucketParser(records.UserAttributeRecordParser),
+ )
+
+ PayloadHashParser = NewPrefixContainerBucketParser(PayloadHash, records.PayloadHashRecordParser, Resolvers{
+ cidResolver: StrictResolver,
+ oidResolver: StrictResolver,
+ })
+
+ ParentParser = NewPrefixContainerBucketParser(Parent, records.ParentRecordParser, Resolvers{
+ cidResolver: StrictResolver,
+ oidResolver: LenientResolver,
+ })
+
+ SplitParser = NewPrefixContainerBucketParser(Split, records.SplitRecordParser, Resolvers{
+ cidResolver: StrictResolver,
+ oidResolver: StrictResolver,
+ })
+
+ ContainerCountersParser = NewPrefixBucketParser(ContainerCounters, records.ContainerCountersRecordParser, Resolvers{
+ cidResolver: LenientResolver,
+ oidResolver: StrictResolver,
+ })
+
+ ECInfoParser = NewPrefixContainerBucketParser(ECInfo, records.ECInfoRecordParser, Resolvers{
+ cidResolver: StrictResolver,
+ oidResolver: LenientResolver,
+ })
+)
diff --git a/cmd/frostfs-lens/internal/schema/metabase/buckets/prefix.go b/cmd/frostfs-lens/internal/schema/metabase/buckets/prefix.go
new file mode 100644
index 000000000..2fb122940
--- /dev/null
+++ b/cmd/frostfs-lens/internal/schema/metabase/buckets/prefix.go
@@ -0,0 +1,53 @@
+package buckets
+
+type Prefix byte
+
+const (
+ Graveyard Prefix = iota
+ Garbage
+ ToMoveIt
+ ContainerVolume
+ Locked
+ ShardInfo
+ Primary
+ Lockers
+ _
+ Tombstone
+ Small
+ Root
+ Owner
+ UserAttribute
+ PayloadHash
+ Parent
+ Split
+ ContainerCounters
+ ECInfo
+)
+
+var x = map[Prefix]string{
+ Graveyard: "Graveyard",
+ Garbage: "Garbage",
+ ToMoveIt: "To Move It",
+ ContainerVolume: "Container Volume",
+ Locked: "Locked",
+ ShardInfo: "Shard Info",
+ Primary: "Primary",
+ Lockers: "Lockers",
+ Tombstone: "Tombstone",
+ Small: "Small",
+ Root: "Root",
+ Owner: "Owner",
+ UserAttribute: "User Attribute",
+ PayloadHash: "Payload Hash",
+ Parent: "Parent",
+ Split: "Split",
+ ContainerCounters: "Container Counters",
+ ECInfo: "EC Info",
+}
+
+func (p Prefix) String() string {
+ if s, ok := x[p]; ok {
+ return s
+ }
+ return "Unknown Prefix"
+}
diff --git a/cmd/frostfs-lens/internal/schema/metabase/buckets/string.go b/cmd/frostfs-lens/internal/schema/metabase/buckets/string.go
new file mode 100644
index 000000000..db90bddbd
--- /dev/null
+++ b/cmd/frostfs-lens/internal/schema/metabase/buckets/string.go
@@ -0,0 +1,48 @@
+package buckets
+
+import (
+ "fmt"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
+ "github.com/gdamore/tcell/v2"
+)
+
+func (b *PrefixBucket) String() string {
+ return common.FormatSimple(
+ fmt.Sprintf("(%2d %-18s)", b.prefix, b.prefix), tcell.ColorLime,
+ )
+}
+
+func (b *PrefixContainerBucket) String() string {
+ return fmt.Sprintf(
+ "%s CID %s",
+ common.FormatSimple(
+ fmt.Sprintf("(%2d %-18s)", b.prefix, b.prefix), tcell.ColorLime,
+ ),
+ common.FormatSimple(b.id.String(), tcell.ColorAqua),
+ )
+}
+
+func (b *UserBucket) String() string {
+ return "UID " + common.FormatSimple(b.id.String(), tcell.ColorAqua)
+}
+
+func (b *ContainerBucket) String() string {
+ return "CID " + common.FormatSimple(b.id.String(), tcell.ColorAqua)
+}
+
+func (b *UserAttributeKeyBucket) String() string {
+ return fmt.Sprintf("%s CID %s ATTR-KEY %s",
+ common.FormatSimple(
+ fmt.Sprintf("(%2d %-18s)", b.prefix, b.prefix), tcell.ColorLime,
+ ),
+ common.FormatSimple(
+ fmt.Sprintf("%-44s", b.id), tcell.ColorAqua,
+ ),
+ common.FormatSimple(b.key, tcell.ColorAqua),
+ )
+}
+
+func (b *UserAttributeValueBucket) String() string {
+ return "ATTR-VALUE " + common.FormatSimple(b.value, tcell.ColorAqua)
+}
diff --git a/cmd/frostfs-lens/internal/schema/metabase/buckets/types.go b/cmd/frostfs-lens/internal/schema/metabase/buckets/types.go
new file mode 100644
index 000000000..82b47dd85
--- /dev/null
+++ b/cmd/frostfs-lens/internal/schema/metabase/buckets/types.go
@@ -0,0 +1,166 @@
+package buckets
+
+import (
+ "errors"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
+ "github.com/mr-tron/base58"
+)
+
+type (
+ PrefixBucket struct {
+ prefix Prefix
+ resolvers Resolvers
+ }
+
+ PrefixContainerBucket struct {
+ prefix Prefix
+ id cid.ID
+ resolvers Resolvers
+ }
+
+ ContainerBucket struct {
+ id cid.ID
+ resolvers Resolvers
+ }
+
+ UserBucket struct {
+ id user.ID
+ resolvers Resolvers
+ }
+
+ UserAttributeKeyBucket struct {
+ prefix Prefix
+ id cid.ID
+ key string
+ }
+
+ UserAttributeValueBucket struct {
+ value string
+ }
+)
+
+type (
+ FilterResolver = func(result bool) common.FilterResult
+
+ Resolvers struct {
+ cidResolver FilterResolver
+ oidResolver FilterResolver
+ }
+)
+
+var (
+ StrictResolver = func(x bool) common.FilterResult { return common.IfThenElse(x, common.Yes, common.No) }
+ LenientResolver = func(x bool) common.FilterResult { return common.IfThenElse(x, common.Yes, common.Maybe) }
+)
+
+var (
+ ErrNotBucket = errors.New("not a bucket")
+ ErrInvalidKeyLength = errors.New("invalid key length")
+ ErrInvalidValueLength = errors.New("invalid value length")
+ ErrInvalidPrefix = errors.New("invalid prefix")
+)
+
+func NewPrefixBucketParser(prefix Prefix, next common.Parser, resolvers Resolvers) common.Parser {
+ return func(key, value []byte) (common.SchemaEntry, common.Parser, error) {
+ if value != nil {
+ return nil, nil, ErrNotBucket
+ }
+ if len(key) != 1 {
+ return nil, nil, ErrInvalidKeyLength
+ }
+ var b PrefixBucket
+ if b.prefix = Prefix(key[0]); b.prefix != prefix {
+ return nil, nil, ErrInvalidPrefix
+ }
+ b.resolvers = resolvers
+ return &b, next, nil
+ }
+}
+
+func NewPrefixContainerBucketParser(prefix Prefix, next common.Parser, resolvers Resolvers) common.Parser {
+ return func(key, value []byte) (common.SchemaEntry, common.Parser, error) {
+ if value != nil {
+ return nil, nil, ErrNotBucket
+ }
+ if len(key) != 33 {
+ return nil, nil, ErrInvalidKeyLength
+ }
+ var b PrefixContainerBucket
+ if b.prefix = Prefix(key[0]); b.prefix != prefix {
+ return nil, nil, ErrInvalidPrefix
+ }
+ if err := b.id.Decode(key[1:]); err != nil {
+ return nil, nil, err
+ }
+ b.resolvers = resolvers
+ return &b, next, nil
+ }
+}
+
+func NewUserBucketParser(next common.Parser, resolvers Resolvers) common.Parser {
+ return func(key, value []byte) (common.SchemaEntry, common.Parser, error) {
+ if value != nil {
+ return nil, nil, ErrNotBucket
+ }
+ var b UserBucket
+ if err := b.id.DecodeString(base58.Encode(key)); err != nil {
+ return nil, nil, err
+ }
+ b.resolvers = resolvers
+ return &b, next, nil
+ }
+}
+
+func NewContainerBucketParser(next common.Parser, resolvers Resolvers) common.Parser {
+ return func(key, value []byte) (common.SchemaEntry, common.Parser, error) {
+ if value != nil {
+ return nil, nil, ErrNotBucket
+ }
+ if len(key) != 32 {
+ return nil, nil, ErrInvalidKeyLength
+ }
+ var b ContainerBucket
+ if err := b.id.Decode(key); err != nil {
+ return nil, nil, err
+ }
+ b.resolvers = resolvers
+ return &b, next, nil
+ }
+}
+
+func NewUserAttributeKeyBucketParser(next common.Parser) common.Parser {
+ return func(key, value []byte) (common.SchemaEntry, common.Parser, error) {
+ if value != nil {
+ return nil, nil, ErrNotBucket
+ }
+ if len(key) < 34 {
+ return nil, nil, ErrInvalidKeyLength
+ }
+ var b UserAttributeKeyBucket
+ if b.prefix = Prefix(key[0]); b.prefix != UserAttribute {
+ return nil, nil, ErrInvalidPrefix
+ }
+ if err := b.id.Decode(key[1:33]); err != nil {
+ return nil, nil, err
+ }
+ b.key = string(key[33:])
+ return &b, next, nil
+ }
+}
+
+func NewUserAttributeValueBucketParser(next common.Parser) common.Parser {
+ return func(key, value []byte) (common.SchemaEntry, common.Parser, error) {
+ if value != nil {
+ return nil, nil, ErrNotBucket
+ }
+ if len(key) == 0 {
+ return nil, nil, ErrInvalidKeyLength
+ }
+ var b UserAttributeValueBucket
+ b.value = string(key)
+ return &b, next, nil
+ }
+}
diff --git a/cmd/frostfs-lens/internal/schema/metabase/parser.go b/cmd/frostfs-lens/internal/schema/metabase/parser.go
new file mode 100644
index 000000000..ea095e207
--- /dev/null
+++ b/cmd/frostfs-lens/internal/schema/metabase/parser.go
@@ -0,0 +1,29 @@
+package metabase
+
+import (
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/metabase/buckets"
+)
+
+var MetabaseParser = common.WithFallback(
+ common.Any(
+ buckets.GraveyardParser,
+ buckets.GarbageParser,
+ buckets.ContainerVolumeParser,
+ buckets.LockedParser,
+ buckets.ShardInfoParser,
+ buckets.PrimaryParser,
+ buckets.LockersParser,
+ buckets.TombstoneParser,
+ buckets.SmallParser,
+ buckets.RootParser,
+ buckets.OwnerParser,
+ buckets.UserAttributeParser,
+ buckets.PayloadHashParser,
+ buckets.ParentParser,
+ buckets.SplitParser,
+ buckets.ContainerCountersParser,
+ buckets.ECInfoParser,
+ ),
+ common.RawParser.ToFallbackParser(),
+)
diff --git a/cmd/frostfs-lens/internal/schema/metabase/records/detailed.go b/cmd/frostfs-lens/internal/schema/metabase/records/detailed.go
new file mode 100644
index 000000000..2dda15b4f
--- /dev/null
+++ b/cmd/frostfs-lens/internal/schema/metabase/records/detailed.go
@@ -0,0 +1,65 @@
+package records
+
+import (
+ "github.com/davecgh/go-spew/spew"
+)
+
+func (r *GraveyardRecord) DetailedString() string {
+ return spew.Sdump(*r)
+}
+
+func (r *GarbageRecord) DetailedString() string {
+ return spew.Sdump(*r)
+}
+
+func (r *ContainerVolumeRecord) DetailedString() string {
+ return spew.Sdump(*r)
+}
+
+func (r *LockedRecord) DetailedString() string {
+ return spew.Sdump(*r)
+}
+
+func (r *ShardInfoRecord) DetailedString() string {
+ return spew.Sdump(*r)
+}
+
+func (r *ObjectRecord) DetailedString() string {
+ return spew.Sdump(*r)
+}
+
+func (r *SmallRecord) DetailedString() string {
+ return spew.Sdump(*r)
+}
+
+func (r *RootRecord) DetailedString() string {
+ return spew.Sdump(*r)
+}
+
+func (r *OwnerRecord) DetailedString() string {
+ return spew.Sdump(*r)
+}
+
+func (r *UserAttributeRecord) DetailedString() string {
+ return spew.Sdump(*r)
+}
+
+func (r *PayloadHashRecord) DetailedString() string {
+ return spew.Sdump(*r)
+}
+
+func (r *ParentRecord) DetailedString() string {
+ return spew.Sdump(*r)
+}
+
+func (r *SplitRecord) DetailedString() string {
+ return spew.Sdump(*r)
+}
+
+func (r *ContainerCountersRecord) DetailedString() string {
+ return spew.Sdump(*r)
+}
+
+func (r *ECInfoRecord) DetailedString() string {
+ return spew.Sdump(*r)
+}
diff --git a/cmd/frostfs-lens/internal/schema/metabase/records/filter.go b/cmd/frostfs-lens/internal/schema/metabase/records/filter.go
new file mode 100644
index 000000000..880a7a8ff
--- /dev/null
+++ b/cmd/frostfs-lens/internal/schema/metabase/records/filter.go
@@ -0,0 +1,145 @@
+package records
+
+import (
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+)
+
+func (r *GraveyardRecord) Filter(typ string, val any) common.FilterResult {
+ switch typ {
+ case "cid":
+ id := val.(cid.ID)
+ return common.IfThenElse(r.object.Container().Equals(id), common.Yes, common.No)
+ case "oid":
+ id := val.(oid.ID)
+ return common.IfThenElse(r.object.Object().Equals(id), common.Yes, common.No)
+ default:
+ return common.No
+ }
+}
+
+func (r *GarbageRecord) Filter(typ string, val any) common.FilterResult {
+ switch typ {
+ case "cid":
+ id := val.(cid.ID)
+ return common.IfThenElse(r.addr.Container().Equals(id), common.Yes, common.No)
+ case "oid":
+ id := val.(oid.ID)
+ return common.IfThenElse(r.addr.Object().Equals(id), common.Yes, common.No)
+ default:
+ return common.No
+ }
+}
+
+func (r *ContainerVolumeRecord) Filter(typ string, val any) common.FilterResult {
+ switch typ {
+ case "cid":
+ id := val.(cid.ID)
+ return common.IfThenElse(r.id.Equals(id), common.Yes, common.No)
+ default:
+ return common.No
+ }
+}
+
+func (r *ShardInfoRecord) Filter(string, any) common.FilterResult {
+ return common.No
+}
+
+func (r *LockedRecord) Filter(typ string, val any) common.FilterResult {
+ switch typ {
+ case "oid":
+ id := val.(oid.ID)
+ return common.IfThenElse(r.id.Equals(id), common.Yes, common.No)
+ default:
+ return common.No
+ }
+}
+
+func (r *ObjectRecord) Filter(typ string, val any) common.FilterResult {
+ switch typ {
+ case "oid":
+ id := val.(oid.ID)
+ return common.IfThenElse(r.id.Equals(id), common.Yes, common.No)
+ default:
+ return common.No
+ }
+}
+
+func (r *SmallRecord) Filter(typ string, val any) common.FilterResult {
+ switch typ {
+ case "oid":
+ id := val.(oid.ID)
+ return common.IfThenElse(r.id.Equals(id), common.Yes, common.No)
+ default:
+ return common.No
+ }
+}
+
+func (r *RootRecord) Filter(typ string, val any) common.FilterResult {
+ switch typ {
+ case "oid":
+ id := val.(oid.ID)
+ return common.IfThenElse(r.id.Equals(id), common.Yes, common.No)
+ default:
+ return common.No
+ }
+}
+
+func (r *OwnerRecord) Filter(typ string, val any) common.FilterResult {
+ switch typ {
+ case "oid":
+ id := val.(oid.ID)
+ return common.IfThenElse(r.id.Equals(id), common.Yes, common.No)
+ default:
+ return common.No
+ }
+}
+
+func (r *UserAttributeRecord) Filter(typ string, val any) common.FilterResult {
+ switch typ {
+ case "oid":
+ id := val.(oid.ID)
+ return common.IfThenElse(r.id.Equals(id), common.Yes, common.No)
+ default:
+ return common.No
+ }
+}
+
+func (r *PayloadHashRecord) Filter(string, any) common.FilterResult {
+ return common.No
+}
+
+func (r *ParentRecord) Filter(typ string, val any) common.FilterResult {
+ switch typ {
+ case "oid":
+ id := val.(oid.ID)
+ return common.IfThenElse(r.parent.Equals(id), common.Yes, common.No)
+ default:
+ return common.No
+ }
+}
+
+func (r *SplitRecord) Filter(string, any) common.FilterResult {
+ return common.No
+}
+
+func (r *ContainerCountersRecord) Filter(typ string, val any) common.FilterResult {
+ switch typ {
+ case "cid":
+ id := val.(cid.ID)
+ return common.IfThenElse(r.id.Equals(id), common.Yes, common.No)
+ default:
+ return common.No
+ }
+}
+
+func (r *ECInfoRecord) Filter(typ string, val any) common.FilterResult {
+ switch typ {
+ case "oid":
+ id := val.(oid.ID)
+ return common.IfThenElse(r.id.Equals(id), common.Yes, common.No)
+ default:
+ return common.No
+ }
+}
diff --git a/cmd/frostfs-lens/internal/schema/metabase/records/parsers.go b/cmd/frostfs-lens/internal/schema/metabase/records/parsers.go
new file mode 100644
index 000000000..1b070e2a0
--- /dev/null
+++ b/cmd/frostfs-lens/internal/schema/metabase/records/parsers.go
@@ -0,0 +1,251 @@
+package records
+
+import (
+ "encoding/binary"
+ "errors"
+ "strconv"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+)
+
+var (
+ ErrInvalidKeyLength = errors.New("invalid key length")
+ ErrInvalidValueLength = errors.New("invalid value length")
+ ErrInvalidPrefix = errors.New("invalid prefix")
+)
+
+func GraveyardRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) {
+ if len(key) != 64 {
+ return nil, nil, ErrInvalidKeyLength
+ }
+ if len(value) != 64 {
+ return nil, nil, ErrInvalidValueLength
+ }
+ var (
+ cnr cid.ID
+ obj oid.ID
+ r GraveyardRecord
+ )
+
+ _ = cnr.Decode(key[:32])
+ _ = obj.Decode(key[32:])
+
+ r.object.SetContainer(cnr)
+ r.object.SetObject(obj)
+
+ _ = cnr.Decode(value[:32])
+ _ = obj.Decode(value[32:])
+
+ r.tombstone.SetContainer(cnr)
+ r.tombstone.SetObject(obj)
+
+ return &r, nil, nil
+}
+
+func GarbageRecordParser(key, _ []byte) (common.SchemaEntry, common.Parser, error) {
+ if len(key) != 64 {
+ return nil, nil, ErrInvalidKeyLength
+ }
+ var (
+ cnr cid.ID
+ obj oid.ID
+ r GarbageRecord
+ )
+
+ _ = cnr.Decode(key[:32])
+ _ = obj.Decode(key[32:])
+
+ r.addr.SetContainer(cnr)
+ r.addr.SetObject(obj)
+
+ return &r, nil, nil
+}
+
+func ContainerVolumeRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) {
+ if len(key) != 32 {
+ return nil, nil, ErrInvalidKeyLength
+ }
+ if len(value) != 8 {
+ return nil, nil, ErrInvalidValueLength
+ }
+ var r ContainerVolumeRecord
+
+ _ = r.id.Decode(key)
+ r.volume = binary.LittleEndian.Uint64(value)
+
+ return &r, nil, nil
+}
+
+func LockedRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) {
+ var (
+ r LockedRecord
+ err error
+ )
+
+ if err := r.id.Decode(key); err != nil {
+ return nil, nil, err
+ }
+ if r.ids, err = DecodeOIDs(value); err != nil {
+ return nil, nil, err
+ }
+ return &r, nil, nil
+}
+
+func ShardInfoRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) {
+ if len(key) == 0 {
+ return nil, nil, ErrInvalidKeyLength
+ }
+
+ var r ShardInfoRecord
+ if string(key) == "id" {
+ r.label = string(key)
+ r.value = shard.ID(value).String()
+
+ return &r, nil, nil
+ }
+
+ if len(value) != 8 {
+ return nil, nil, ErrInvalidValueLength
+ }
+ r.label = string(key)
+ r.value = strconv.FormatUint(binary.LittleEndian.Uint64(value), 10)
+
+ return &r, nil, nil
+}
+
+func ObjectRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) {
+ if len(key) != 32 {
+ return nil, nil, ErrInvalidKeyLength
+ }
+ var r ObjectRecord
+
+ _ = r.id.Decode(key)
+ if err := r.object.Unmarshal(value); err != nil {
+ return nil, nil, err
+ }
+
+ return &r, nil, nil
+}
+
+func SmallRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) {
+ var r SmallRecord
+ if err := r.id.Decode(key); err != nil {
+ return nil, nil, err
+ }
+ if len(value) != 0 {
+ x := string(value)
+ r.storageID = &x
+ }
+ return &r, nil, nil
+}
+
+func RootRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) {
+ var r RootRecord
+ if err := r.id.Decode(key); err != nil {
+ return nil, nil, err
+ }
+ if len(value) == 0 {
+ return &r, nil, nil
+ }
+ r.info = &objectSDK.SplitInfo{}
+ if err := r.info.Unmarshal(value); err != nil {
+ return nil, nil, err
+ }
+ return &r, nil, nil
+}
+
+func OwnerRecordParser(key, _ []byte) (common.SchemaEntry, common.Parser, error) {
+ var r OwnerRecord
+ if err := r.id.Decode(key); err != nil {
+ return nil, nil, err
+ }
+ return &r, nil, nil
+}
+
+func UserAttributeRecordParser(key, _ []byte) (common.SchemaEntry, common.Parser, error) {
+ var r UserAttributeRecord
+ if err := r.id.Decode(key); err != nil {
+ return nil, nil, err
+ }
+ return &r, nil, nil
+}
+
+func PayloadHashRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) {
+ if len(key) != 32 {
+ return nil, nil, ErrInvalidKeyLength
+ }
+ var (
+ err error
+ r PayloadHashRecord
+ )
+
+ r.checksum.SetSHA256([32]byte(key))
+ if r.ids, err = DecodeOIDs(value); err != nil {
+ return nil, nil, err
+ }
+ return &r, nil, nil
+}
+
+func ParentRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) {
+ var (
+ r ParentRecord
+ err error
+ )
+ if err = r.parent.Decode(key); err != nil {
+ return nil, nil, err
+ }
+ if r.ids, err = DecodeOIDs(value); err != nil {
+ return nil, nil, err
+ }
+ return &r, nil, nil
+}
+
+func SplitRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) {
+ var (
+ err error
+ r SplitRecord
+ )
+ if err = r.id.UnmarshalBinary(key); err != nil {
+ return nil, nil, err
+ }
+ if r.ids, err = DecodeOIDs(value); err != nil {
+ return nil, nil, err
+ }
+ return &r, nil, nil
+}
+
+func ContainerCountersRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) {
+ if len(value) != 24 {
+ return nil, nil, ErrInvalidValueLength
+ }
+
+ var r ContainerCountersRecord
+ if err := r.id.Decode(key); err != nil {
+ return nil, nil, err
+ }
+
+ r.logical = binary.LittleEndian.Uint64(value[:8])
+ r.physical = binary.LittleEndian.Uint64(value[8:16])
+ r.user = binary.LittleEndian.Uint64(value[16:24])
+
+ return &r, nil, nil
+}
+
+func ECInfoRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) {
+ var (
+ r ECInfoRecord
+ err error
+ )
+
+ if err := r.id.Decode(key); err != nil {
+ return nil, nil, err
+ }
+ if r.ids, err = DecodeOIDs(value); err != nil {
+ return nil, nil, err
+ }
+ return &r, nil, nil
+}
diff --git a/cmd/frostfs-lens/internal/schema/metabase/records/string.go b/cmd/frostfs-lens/internal/schema/metabase/records/string.go
new file mode 100644
index 000000000..a6c70d537
--- /dev/null
+++ b/cmd/frostfs-lens/internal/schema/metabase/records/string.go
@@ -0,0 +1,135 @@
+package records
+
+import (
+ "fmt"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
+ "github.com/gdamore/tcell/v2"
+ "github.com/rivo/tview"
+)
+
+func (r *GraveyardRecord) String() string {
+ return fmt.Sprintf(
+ "Object CID %s OID %s %c Tombstone CID %s OID %s",
+ common.FormatSimple(fmt.Sprintf("%-44s", r.object.Container()), tcell.ColorAqua),
+ common.FormatSimple(fmt.Sprintf("%-44s", r.object.Object()), tcell.ColorAqua),
+ tview.Borders.Vertical,
+ common.FormatSimple(fmt.Sprintf("%-44s", r.tombstone.Container()), tcell.ColorAqua),
+ common.FormatSimple(fmt.Sprintf("%-44s", r.tombstone.Object()), tcell.ColorAqua),
+ )
+}
+
+func (r *GarbageRecord) String() string {
+ return fmt.Sprintf(
+ "CID %-44s OID %-44s",
+ common.FormatSimple(fmt.Sprintf("%-44s", r.addr.Container()), tcell.ColorAqua),
+ common.FormatSimple(fmt.Sprintf("%-44s", r.addr.Object()), tcell.ColorAqua),
+ )
+}
+
+func (r *ContainerVolumeRecord) String() string {
+ return fmt.Sprintf(
+ "CID %-44s %c %d",
+ common.FormatSimple(fmt.Sprintf("%-44s", r.id), tcell.ColorAqua),
+ tview.Borders.Vertical,
+ r.volume,
+ )
+}
+
+func (r *LockedRecord) String() string {
+ return fmt.Sprintf(
+ "Locker OID %s %c Locked [%d]OID {...}",
+ common.FormatSimple(fmt.Sprintf("%-44s", r.id), tcell.ColorAqua),
+ tview.Borders.Vertical,
+ len(r.ids),
+ )
+}
+
+func (r *ShardInfoRecord) String() string {
+ return fmt.Sprintf("%-13s %c %s", r.label, tview.Borders.Vertical, r.value)
+}
+
+func (r *ObjectRecord) String() string {
+ return fmt.Sprintf(
+ "OID %s %c Object {...}",
+ common.FormatSimple(fmt.Sprintf("%-44s", r.id), tcell.ColorAqua),
+ tview.Borders.Vertical,
+ )
+}
+
+func (r *SmallRecord) String() string {
+ s := fmt.Sprintf(
+ "OID %s %c",
+ common.FormatSimple(fmt.Sprintf("%-44s", r.id), tcell.ColorAqua),
+ tview.Borders.Vertical,
+ )
+ if r.storageID != nil {
+ s = fmt.Sprintf("%s %s", s, *r.storageID)
+ }
+ return s
+}
+
+func (r *RootRecord) String() string {
+ s := fmt.Sprintf(
+ "Root OID %s %c",
+ common.FormatSimple(fmt.Sprintf("%-44s", r.id), tcell.ColorAqua),
+ tview.Borders.Vertical,
+ )
+ if r.info != nil {
+ s += " Split info {...}"
+ }
+ return s
+}
+
+func (r *OwnerRecord) String() string {
+ return "OID " + common.FormatSimple(r.id.String(), tcell.ColorAqua)
+}
+
+func (r *UserAttributeRecord) String() string {
+ return "OID " + common.FormatSimple(r.id.String(), tcell.ColorAqua)
+}
+
+func (r *PayloadHashRecord) String() string {
+ return fmt.Sprintf(
+ "Checksum %s %c [%d]OID {...}",
+ common.FormatSimple(r.checksum.String(), tcell.ColorAqua),
+ tview.Borders.Vertical,
+ len(r.ids),
+ )
+}
+
+func (r *ParentRecord) String() string {
+ return fmt.Sprintf(
+ "Parent OID %s %c [%d]OID {...}",
+ common.FormatSimple(fmt.Sprintf("%-44s", r.parent), tcell.ColorAqua),
+ tview.Borders.Vertical,
+ len(r.ids),
+ )
+}
+
+func (r *SplitRecord) String() string {
+ return fmt.Sprintf(
+ "Split ID %s %c [%d]OID {...}",
+ common.FormatSimple(r.id.String(), tcell.ColorAqua),
+ tview.Borders.Vertical,
+ len(r.ids),
+ )
+}
+
+func (r *ContainerCountersRecord) String() string {
+ return fmt.Sprintf(
+ "CID %s %c logical %d, physical %d, user %d",
+ common.FormatSimple(fmt.Sprintf("%-44s", r.id), tcell.ColorAqua),
+ tview.Borders.Vertical,
+ r.logical, r.physical, r.user,
+ )
+}
+
+func (r *ECInfoRecord) String() string {
+ return fmt.Sprintf(
+ "OID %s %c [%d]OID {...}",
+ common.FormatSimple(fmt.Sprintf("%-44s", r.id), tcell.ColorAqua),
+ tview.Borders.Vertical,
+ len(r.ids),
+ )
+}
diff --git a/cmd/frostfs-lens/internal/schema/metabase/records/types.go b/cmd/frostfs-lens/internal/schema/metabase/records/types.go
new file mode 100644
index 000000000..34c1c29fd
--- /dev/null
+++ b/cmd/frostfs-lens/internal/schema/metabase/records/types.go
@@ -0,0 +1,82 @@
+package records
+
+import (
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "github.com/google/uuid"
+)
+
+type (
+ GraveyardRecord struct {
+ object, tombstone oid.Address
+ }
+
+ GarbageRecord struct {
+ addr oid.Address
+ }
+
+ ContainerVolumeRecord struct {
+ id cid.ID
+ volume uint64
+ }
+
+ LockedRecord struct {
+ id oid.ID
+ ids []oid.ID
+ }
+
+ ShardInfoRecord struct {
+ label string
+ value string
+ }
+
+ ObjectRecord struct {
+ id oid.ID
+ object objectSDK.Object
+ }
+
+ SmallRecord struct {
+ id oid.ID
+ storageID *string // optional
+ }
+
+ RootRecord struct {
+ id oid.ID
+ info *objectSDK.SplitInfo // optional
+ }
+
+ OwnerRecord struct {
+ id oid.ID
+ }
+
+ UserAttributeRecord struct {
+ id oid.ID
+ }
+
+ PayloadHashRecord struct {
+ checksum checksum.Checksum
+ ids []oid.ID
+ }
+
+ ParentRecord struct {
+ parent oid.ID
+ ids []oid.ID
+ }
+
+ SplitRecord struct {
+ id uuid.UUID
+ ids []oid.ID
+ }
+
+ ContainerCountersRecord struct {
+ id cid.ID
+ logical, physical, user uint64
+ }
+
+ ECInfoRecord struct {
+ id oid.ID
+ ids []oid.ID
+ }
+)
diff --git a/cmd/frostfs-lens/internal/schema/metabase/records/util.go b/cmd/frostfs-lens/internal/schema/metabase/records/util.go
new file mode 100644
index 000000000..f50ebe951
--- /dev/null
+++ b/cmd/frostfs-lens/internal/schema/metabase/records/util.go
@@ -0,0 +1,20 @@
+package records
+
+import (
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "github.com/nspcc-dev/neo-go/pkg/io"
+)
+
+func DecodeOIDs(data []byte) ([]oid.ID, error) {
+ r := io.NewBinReaderFromBuf(data)
+
+ size := r.ReadVarUint()
+ oids := make([]oid.ID, size)
+
+ for i := uint64(0); i < size; i++ {
+ if err := oids[i].Decode(r.ReadVarBytes()); err != nil {
+ return nil, err
+ }
+ }
+ return oids, nil
+}
diff --git a/go.mod b/go.mod
index 19bf7852f..be3c6e74d 100644
--- a/go.mod
+++ b/go.mod
@@ -17,7 +17,9 @@ require (
github.com/VictoriaMetrics/easyproto v0.1.4
github.com/cheggaaa/pb v1.0.29
github.com/chzyer/readline v1.5.1
+ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc
github.com/flynn-archive/go-shlex v0.0.0-20150515145356-3f9db97f8568
+ github.com/gdamore/tcell/v2 v2.7.4
github.com/go-pkgz/expirable-cache/v3 v3.0.0
github.com/google/uuid v1.6.0
github.com/hashicorp/golang-lru/v2 v2.0.7
@@ -65,10 +67,10 @@ require (
github.com/consensys/bavard v0.1.13 // indirect
github.com/consensys/gnark-crypto v0.12.2-0.20231222162921-eb75782795d2 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect
- github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/davidmz/go-pageant v1.0.2 // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect
github.com/fsnotify/fsnotify v1.7.0 // indirect
+ github.com/gdamore/encoding v1.0.0 // indirect
github.com/go-fed/httpsig v1.1.0 // indirect
github.com/go-logr/logr v1.4.1 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
@@ -85,6 +87,7 @@ require (
github.com/josharian/intern v1.0.0 // indirect
github.com/klauspost/cpuid/v2 v2.2.6 // indirect
github.com/klauspost/reedsolomon v1.12.1 // indirect
+ github.com/lucasb-eyer/go-colorful v1.2.0 // indirect
github.com/magiconair/properties v1.8.7 // indirect
github.com/mattn/go-runewidth v0.0.15 // indirect
github.com/minio/sha256-simd v1.0.1 // indirect
diff --git a/go.sum b/go.sum
index 8ebd59157..d0218a348 100644
--- a/go.sum
+++ b/go.sum
@@ -75,6 +75,10 @@ github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4
github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU=
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
+github.com/gdamore/encoding v1.0.0 h1:+7OoQ1Bc6eTm5niUzBa0Ctsh6JbMW6Ra+YNuAtDBdko=
+github.com/gdamore/encoding v1.0.0/go.mod h1:alR0ol34c49FCSBLjhosxzcPHQbf2trDkoo5dl+VrEg=
+github.com/gdamore/tcell/v2 v2.7.4 h1:sg6/UnTM9jGpZU+oFYAsDahfchWAFW8Xx2yFinNSAYU=
+github.com/gdamore/tcell/v2 v2.7.4/go.mod h1:dSXtXTSK0VsW1biw65DZLZ2NKr7j0qP/0J7ONmsraWg=
github.com/go-fed/httpsig v1.1.0 h1:9M+hb0jkEICD8/cAiNqEB66R87tTINszBRTjwjQzWcI=
github.com/go-fed/httpsig v1.1.0/go.mod h1:RCMrTZvN1bJYtofsG4rd5NaO5obxQ5xBkdiS7xsT7bM=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
@@ -142,6 +146,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8=
+github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY=
+github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0=
github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY=
github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
@@ -218,6 +224,7 @@ github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5E
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
+github.com/rivo/uniseg v0.4.3/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
@@ -352,6 +359,7 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI=
golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
@@ -359,6 +367,7 @@ golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuX
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0=
+golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8=
golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
From 9cbd32bce8524e32711287f86196382517f37562 Mon Sep 17 00:00:00 2001
From: Aleksey Savchuk
Date: Fri, 16 Aug 2024 14:33:03 +0300
Subject: [PATCH 073/705] [#1223] lens/tui: Add writecache schema
Signed-off-by: Aleksey Savchuk
---
.../internal/schema/writecache/parsers.go | 63 ++++++++++++++++++
.../internal/schema/writecache/types.go | 66 +++++++++++++++++++
2 files changed, 129 insertions(+)
create mode 100644 cmd/frostfs-lens/internal/schema/writecache/parsers.go
create mode 100644 cmd/frostfs-lens/internal/schema/writecache/types.go
diff --git a/cmd/frostfs-lens/internal/schema/writecache/parsers.go b/cmd/frostfs-lens/internal/schema/writecache/parsers.go
new file mode 100644
index 000000000..7d70b27b2
--- /dev/null
+++ b/cmd/frostfs-lens/internal/schema/writecache/parsers.go
@@ -0,0 +1,63 @@
+package writecache
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "strings"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "github.com/mr-tron/base58"
+)
+
+var WritecacheParser = common.WithFallback(
+ DefaultBucketParser,
+ common.RawParser.ToFallbackParser(),
+)
+
+func DefaultBucketParser(key, value []byte) (common.SchemaEntry, common.Parser, error) {
+ if value != nil {
+ return nil, nil, errors.New("not a bucket")
+ }
+ if !bytes.Equal(key, []byte{0}) {
+ return nil, nil, errors.New("invalid key")
+ }
+ return &DefaultBucket{}, DefaultRecordParser, nil
+}
+
+func DefaultRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) {
+ parts := strings.Split(string(key), "/")
+
+ if len(parts) != 2 {
+ return nil, nil, errors.New("invalid key, expected address string /")
+ }
+
+ cnrRaw, err := base58.Decode(parts[0])
+ if err != nil {
+ return nil, nil, errors.New("can't decode CID string")
+ }
+ objRaw, err := base58.Decode(parts[1])
+ if err != nil {
+ return nil, nil, errors.New("can't decode OID string")
+ }
+
+ cnr := cid.ID{}
+ if err := cnr.Decode(cnrRaw); err != nil {
+ return nil, nil, fmt.Errorf("can't decode CID: %w", err)
+ }
+ obj := oid.ID{}
+ if err := obj.Decode(objRaw); err != nil {
+ return nil, nil, fmt.Errorf("can't decode OID: %w", err)
+ }
+
+ var r DefaultRecord
+
+ r.addr.SetContainer(cnr)
+ r.addr.SetObject(obj)
+
+ r.data = value[:]
+
+ return &r, nil, nil
+}
diff --git a/cmd/frostfs-lens/internal/schema/writecache/types.go b/cmd/frostfs-lens/internal/schema/writecache/types.go
new file mode 100644
index 000000000..3f71c5366
--- /dev/null
+++ b/cmd/frostfs-lens/internal/schema/writecache/types.go
@@ -0,0 +1,66 @@
+package writecache
+
+import (
+ "fmt"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "github.com/davecgh/go-spew/spew"
+ "github.com/gdamore/tcell/v2"
+ "github.com/rivo/tview"
+)
+
+type (
+ DefaultBucket struct{}
+
+ DefaultRecord struct {
+ addr oid.Address
+ data []byte
+ }
+)
+
+func (b *DefaultBucket) String() string {
+ return common.FormatSimple("0 Default", tcell.ColorLime)
+}
+
+func (r *DefaultRecord) String() string {
+ return fmt.Sprintf(
+ "CID %s OID %s %c Data {...}",
+ common.FormatSimple(fmt.Sprintf("%-44s", r.addr.Container()), tcell.ColorAqua),
+ common.FormatSimple(fmt.Sprintf("%-44s", r.addr.Object()), tcell.ColorAqua),
+ tview.Borders.Vertical,
+ )
+}
+
+func (b *DefaultBucket) DetailedString() string {
+ return spew.Sdump(*b)
+}
+
+func (r *DefaultRecord) DetailedString() string {
+ return spew.Sdump(*r)
+}
+
+func (b *DefaultBucket) Filter(typ string, _ any) common.FilterResult {
+ switch typ {
+ case "cid":
+ return common.Maybe
+ case "oid":
+ return common.Maybe
+ default:
+ return common.No
+ }
+}
+
+func (r *DefaultRecord) Filter(typ string, val any) common.FilterResult {
+ switch typ {
+ case "cid":
+ id := val.(cid.ID)
+ return common.IfThenElse(r.addr.Container().Equals(id), common.Yes, common.No)
+ case "oid":
+ id := val.(oid.ID)
+ return common.IfThenElse(r.addr.Object().Equals(id), common.Yes, common.No)
+ default:
+ return common.No
+ }
+}
From ed396448acbd3792da3ae0b756233e8c8fe67fee Mon Sep 17 00:00:00 2001
From: Aleksey Savchuk
Date: Thu, 11 Jul 2024 19:39:54 +0300
Subject: [PATCH 074/705] [#1223] lens/tui: Add TUI app to explore metabase
Signed-off-by: Aleksey Savchuk
---
cmd/frostfs-lens/internal/meta/root.go | 1 +
cmd/frostfs-lens/internal/meta/tui.go | 82 ++++
cmd/frostfs-lens/internal/tui/buckets.go | 257 ++++++++++
cmd/frostfs-lens/internal/tui/db.go | 160 +++++++
cmd/frostfs-lens/internal/tui/detailed.go | 24 +
cmd/frostfs-lens/internal/tui/filter.go | 44 ++
cmd/frostfs-lens/internal/tui/input.go | 77 +++
cmd/frostfs-lens/internal/tui/loading.go | 72 +++
cmd/frostfs-lens/internal/tui/records.go | 271 +++++++++++
cmd/frostfs-lens/internal/tui/types.go | 18 +
cmd/frostfs-lens/internal/tui/ui.go | 548 ++++++++++++++++++++++
cmd/frostfs-lens/internal/tui/util.go | 97 ++++
go.mod | 3 +-
go.sum | 6 +-
14 files changed, 1657 insertions(+), 3 deletions(-)
create mode 100644 cmd/frostfs-lens/internal/meta/tui.go
create mode 100644 cmd/frostfs-lens/internal/tui/buckets.go
create mode 100644 cmd/frostfs-lens/internal/tui/db.go
create mode 100644 cmd/frostfs-lens/internal/tui/detailed.go
create mode 100644 cmd/frostfs-lens/internal/tui/filter.go
create mode 100644 cmd/frostfs-lens/internal/tui/input.go
create mode 100644 cmd/frostfs-lens/internal/tui/loading.go
create mode 100644 cmd/frostfs-lens/internal/tui/records.go
create mode 100644 cmd/frostfs-lens/internal/tui/types.go
create mode 100644 cmd/frostfs-lens/internal/tui/ui.go
create mode 100644 cmd/frostfs-lens/internal/tui/util.go
diff --git a/cmd/frostfs-lens/internal/meta/root.go b/cmd/frostfs-lens/internal/meta/root.go
index 6741abd0c..351d1ce80 100644
--- a/cmd/frostfs-lens/internal/meta/root.go
+++ b/cmd/frostfs-lens/internal/meta/root.go
@@ -32,6 +32,7 @@ func init() {
inspectCMD,
listGraveyardCMD,
listGarbageCMD,
+ tuiCMD,
)
}
diff --git a/cmd/frostfs-lens/internal/meta/tui.go b/cmd/frostfs-lens/internal/meta/tui.go
new file mode 100644
index 000000000..00e8bf117
--- /dev/null
+++ b/cmd/frostfs-lens/internal/meta/tui.go
@@ -0,0 +1,82 @@
+package meta
+
+import (
+ "context"
+ "fmt"
+
+ common "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal"
+ schema "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/metabase"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/tui"
+ "github.com/rivo/tview"
+ "github.com/spf13/cobra"
+ "go.etcd.io/bbolt"
+)
+
+var tuiCMD = &cobra.Command{
+ Use: "explore",
+ Short: "Metabase exploration with a terminal UI",
+ Long: `Launch a terminal UI to explore metabase and search for data.
+
+Available search filters:
+- cid CID
+- oid OID
+- addr CID/OID
+- attr key[/value]
+`,
+ Run: tuiFunc,
+}
+
+var initialPrompt string
+
+func init() {
+ common.AddComponentPathFlag(tuiCMD, &vPath)
+
+ tuiCMD.Flags().StringVar(
+ &initialPrompt,
+ "filter",
+ "",
+ "Filter prompt to start with, format 'tag:value [+ tag:value]...'",
+ )
+}
+
+func tuiFunc(cmd *cobra.Command, _ []string) {
+ common.ExitOnErr(cmd, runTUI(cmd))
+}
+
+func runTUI(cmd *cobra.Command) error {
+ db, err := openDB(false)
+ if err != nil {
+ return fmt.Errorf("couldn't open database: %w", err)
+ }
+ defer db.Close()
+
+ // Need if app was stopped with Ctrl-C.
+ ctx, cancel := context.WithCancel(cmd.Context())
+ defer cancel()
+
+ app := tview.NewApplication()
+ ui := tui.NewUI(ctx, app, db, schema.MetabaseParser, nil)
+
+ _ = ui.AddFilter("cid", tui.CIDParser, "CID")
+ _ = ui.AddFilter("oid", tui.OIDParser, "OID")
+ _ = ui.AddCompositeFilter("addr", tui.AddressParser, "CID/OID")
+ _ = ui.AddCompositeFilter("attr", tui.AttributeParser, "key[/value]")
+
+ err = ui.WithPrompt(initialPrompt)
+ if err != nil {
+ return fmt.Errorf("invalid filter prompt: %w", err)
+ }
+
+ app.SetRoot(ui, true).SetFocus(ui)
+ return app.Run()
+}
+
+func openDB(writable bool) (*bbolt.DB, error) {
+ db, err := bbolt.Open(vPath, 0o600, &bbolt.Options{
+ ReadOnly: !writable,
+ })
+ if err != nil {
+ return nil, err
+ }
+ return db, nil
+}
diff --git a/cmd/frostfs-lens/internal/tui/buckets.go b/cmd/frostfs-lens/internal/tui/buckets.go
new file mode 100644
index 000000000..3f5088e7a
--- /dev/null
+++ b/cmd/frostfs-lens/internal/tui/buckets.go
@@ -0,0 +1,257 @@
+package tui
+
+import (
+ "context"
+ "sync"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
+ "github.com/gdamore/tcell/v2"
+ "github.com/rivo/tview"
+)
+
+type BucketsView struct {
+ *tview.Box
+
+ mu sync.Mutex
+
+ view *tview.TreeView
+ nodeToUpdate *tview.TreeNode
+
+ ui *UI
+ filter *Filter
+}
+
+type bucketNode struct {
+ bucket *Bucket
+ filter *Filter
+}
+
+func NewBucketsView(ui *UI, filter *Filter) *BucketsView {
+ return &BucketsView{
+ Box: tview.NewBox(),
+ view: tview.NewTreeView(),
+ ui: ui,
+ filter: filter,
+ }
+}
+
+func (v *BucketsView) Mount(_ context.Context) error {
+ root := tview.NewTreeNode(".")
+ root.SetExpanded(false)
+ root.SetSelectable(false)
+ root.SetReference(&bucketNode{
+ bucket: &Bucket{NextParser: v.ui.rootParser},
+ filter: v.filter,
+ })
+
+ v.nodeToUpdate = root
+
+ v.view.SetRoot(root)
+ v.view.SetCurrentNode(root)
+
+ return nil
+}
+
+func (v *BucketsView) Update(ctx context.Context) error {
+ if v.nodeToUpdate == nil {
+ return nil
+ }
+ defer func() { v.nodeToUpdate = nil }()
+
+ ctx, cancel := context.WithCancel(ctx)
+ defer cancel()
+
+ ready := make(chan struct{})
+ errCh := make(chan error)
+
+ tmp := tview.NewTreeNode(v.nodeToUpdate.GetText())
+ tmp.SetReference(v.nodeToUpdate.GetReference())
+
+ node := v.nodeToUpdate.GetReference().(*bucketNode)
+
+ go func() {
+ defer close(ready)
+
+ hasBuckets, err := HasBuckets(ctx, v.ui.db, node.bucket.Path)
+ if err != nil {
+ errCh <- err
+ }
+
+ // Show the selected bucket's records instead.
+ if !hasBuckets && node.bucket.NextParser != nil {
+ v.ui.moveNextPage(NewRecordsView(v.ui, node.bucket, node.filter))
+ }
+
+ if v.nodeToUpdate.IsExpanded() {
+ return
+ }
+
+ err = v.loadNodeChildren(ctx, tmp, node.filter)
+ if err != nil {
+ errCh <- err
+ }
+ }()
+
+ select {
+ case <-ctx.Done():
+ case <-ready:
+ v.mu.Lock()
+ v.nodeToUpdate.SetChildren(tmp.GetChildren())
+ v.nodeToUpdate.SetExpanded(!v.nodeToUpdate.IsExpanded())
+ v.mu.Unlock()
+ case err := <-errCh:
+ return err
+ }
+
+ return nil
+}
+
+func (v *BucketsView) Unmount() {
+}
+
+func (v *BucketsView) Draw(screen tcell.Screen) {
+ x, y, width, height := v.GetInnerRect()
+ v.view.SetRect(x, y, width, height)
+
+ v.view.Draw(screen)
+}
+
+func (v *BucketsView) loadNodeChildren(
+ ctx context.Context, node *tview.TreeNode, filter *Filter,
+) error {
+ parentBucket := node.GetReference().(*bucketNode).bucket
+
+ path := parentBucket.Path
+ parser := parentBucket.NextParser
+
+ buffer, err := LoadBuckets(ctx, v.ui.db, path, v.ui.loadBufferSize)
+ if err != nil {
+ return err
+ }
+
+ for item := range buffer {
+ if item.err != nil {
+ return item.err
+ }
+ bucket := item.val
+
+ bucket.Entry, bucket.NextParser, err = parser(bucket.Name, nil)
+ if err != nil {
+ return err
+ }
+
+ satisfies, err := v.bucketSatisfiesFilter(ctx, bucket, filter)
+ if err != nil {
+ return err
+ }
+ if !satisfies {
+ continue
+ }
+
+ child := tview.NewTreeNode(bucket.Entry.String()).
+ SetSelectable(true).
+ SetExpanded(false).
+ SetReference(&bucketNode{
+ bucket: bucket,
+ filter: filter.Apply(bucket.Entry),
+ })
+
+ node.AddChild(child)
+ }
+
+ return nil
+}
+
+func (v *BucketsView) bucketSatisfiesFilter(
+ ctx context.Context, bucket *Bucket, filter *Filter,
+) (bool, error) {
+ // Does the current bucket satisfies the filter?
+ filter = filter.Apply(bucket.Entry)
+
+ if filter.Result() == common.Yes {
+ return true, nil
+ }
+
+ if filter.Result() == common.No {
+ return false, nil
+ }
+
+ ctx, cancel := context.WithCancel(ctx)
+ defer cancel()
+
+ // Check the current bucket's nested buckets if exist
+ bucketsBuffer, err := LoadBuckets(ctx, v.ui.db, bucket.Path, v.ui.loadBufferSize)
+ if err != nil {
+ return false, err
+ }
+
+ for item := range bucketsBuffer {
+ if item.err != nil {
+ return false, item.err
+ }
+ b := item.val
+
+ b.Entry, b.NextParser, err = bucket.NextParser(b.Name, nil)
+ if err != nil {
+ return false, err
+ }
+
+ satisfies, err := v.bucketSatisfiesFilter(ctx, b, filter)
+ if err != nil {
+ return false, err
+ }
+ if satisfies {
+ return true, nil
+ }
+ }
+
+ // Check the current bucket's nested records if exist
+ recordsBuffer, err := LoadRecords(ctx, v.ui.db, bucket.Path, v.ui.loadBufferSize)
+ if err != nil {
+ return false, err
+ }
+
+ for item := range recordsBuffer {
+ if item.err != nil {
+ return false, item.err
+ }
+ r := item.val
+
+ r.Entry, _, err = bucket.NextParser(r.Key, r.Value)
+ if err != nil {
+ return false, err
+ }
+
+ if filter.Apply(r.Entry).Result() == common.Yes {
+ return true, nil
+ }
+ }
+
+ return false, nil
+}
+
+func (v *BucketsView) InputHandler() func(event *tcell.EventKey, setFocus func(p tview.Primitive)) {
+ return v.WrapInputHandler(func(event *tcell.EventKey, _ func(tview.Primitive)) {
+ currentNode := v.view.GetCurrentNode()
+ if currentNode == nil {
+ return
+ }
+
+ switch event.Key() {
+ case tcell.KeyEnter:
+ // Expand or collapse the selected bucket's nested buckets,
+ // otherwise, navigate to that bucket's records.
+ v.nodeToUpdate = currentNode
+ case tcell.KeyCtrlR:
+ // Navigate to the selected bucket's records.
+ bucketNode := currentNode.GetReference().(*bucketNode)
+ v.ui.moveNextPage(NewRecordsView(v.ui, bucketNode.bucket, bucketNode.filter))
+ case tcell.KeyCtrlD:
+ // Navigate to the selected bucket's detailed view.
+ bucketNode := currentNode.GetReference().(*bucketNode)
+ v.ui.moveNextPage(NewDetailedView(bucketNode.bucket.Entry.DetailedString()))
+ default:
+ v.view.InputHandler()(event, func(tview.Primitive) {})
+ }
+ })
+}
diff --git a/cmd/frostfs-lens/internal/tui/db.go b/cmd/frostfs-lens/internal/tui/db.go
new file mode 100644
index 000000000..d0cf611d4
--- /dev/null
+++ b/cmd/frostfs-lens/internal/tui/db.go
@@ -0,0 +1,160 @@
+package tui
+
+import (
+ "context"
+ "errors"
+ "fmt"
+
+ "go.etcd.io/bbolt"
+)
+
+type Item[T any] struct {
+ val T
+ err error
+}
+
+func resolvePath(tx *bbolt.Tx, path [][]byte) (*bbolt.Bucket, error) {
+ if len(path) == 0 {
+ return nil, errors.New("can't find bucket without path")
+ }
+
+ name := path[0]
+ bucket := tx.Bucket(name)
+ if bucket == nil {
+ return nil, fmt.Errorf("no bucket with name %s", name)
+ }
+ for _, name := range path[1:] {
+ bucket = bucket.Bucket(name)
+ if bucket == nil {
+ return nil, fmt.Errorf("no bucket with name %s", name)
+ }
+ }
+ return bucket, nil
+}
+
+func load[T any](
+ ctx context.Context, db *bbolt.DB, path [][]byte, bufferSize int,
+ filter func(key, value []byte) bool, transform func(key, value []byte) T,
+) (<-chan Item[T], error) {
+ buffer := make(chan Item[T], bufferSize)
+
+ go func() {
+ defer close(buffer)
+
+ err := db.View(func(tx *bbolt.Tx) error {
+ var cursor *bbolt.Cursor
+ if len(path) == 0 {
+ cursor = tx.Cursor()
+ } else {
+ bucket, err := resolvePath(tx, path)
+ if err != nil {
+ buffer <- Item[T]{err: fmt.Errorf("can't find bucket: %w", err)}
+ return nil
+ }
+ cursor = bucket.Cursor()
+ }
+
+ key, value := cursor.First()
+ for {
+ if key == nil {
+ return nil
+ }
+ if filter != nil && !filter(key, value) {
+ key, value = cursor.Next()
+ continue
+ }
+
+ select {
+ case <-ctx.Done():
+ return nil
+ case buffer <- Item[T]{val: transform(key, value)}:
+ key, value = cursor.Next()
+ }
+ }
+ })
+ if err != nil {
+ buffer <- Item[T]{err: err}
+ }
+ }()
+
+ return buffer, nil
+}
+
+func LoadBuckets(
+ ctx context.Context, db *bbolt.DB, path [][]byte, bufferSize int,
+) (<-chan Item[*Bucket], error) {
+ buffer, err := load(
+ ctx, db, path, bufferSize,
+ func(_, value []byte) bool {
+ return value == nil
+ },
+ func(key, _ []byte) *Bucket {
+ base := make([][]byte, 0, len(path))
+ base = append(base, path...)
+
+ return &Bucket{
+ Name: key,
+ Path: append(base, key),
+ }
+ },
+ )
+ if err != nil {
+ return nil, fmt.Errorf("can't start iterating bucket: %w", err)
+ }
+
+ return buffer, nil
+}
+
+func LoadRecords(
+ ctx context.Context, db *bbolt.DB, path [][]byte, bufferSize int,
+) (<-chan Item[*Record], error) {
+ buffer, err := load(
+ ctx, db, path, bufferSize,
+ func(_, value []byte) bool {
+ return value != nil
+ },
+ func(key, value []byte) *Record {
+ base := make([][]byte, 0, len(path))
+ base = append(base, path...)
+
+ return &Record{
+ Key: key,
+ Value: value,
+ Path: append(base, key),
+ }
+ },
+ )
+ if err != nil {
+ return nil, fmt.Errorf("can't start iterating bucket: %w", err)
+ }
+
+ return buffer, nil
+}
+
+// HasBuckets checks if a bucket has nested buckets. It relies on assumption
+// that a bucket can have either nested buckets or records but not both.
+func HasBuckets(ctx context.Context, db *bbolt.DB, path [][]byte) (bool, error) {
+ ctx, cancel := context.WithCancel(ctx)
+ defer cancel()
+
+ buffer, err := load(
+ ctx, db, path, 1,
+ nil,
+ func(_, value []byte) []byte { return value },
+ )
+ if err != nil {
+ return false, err
+ }
+
+ x, ok := <-buffer
+ if !ok {
+ return false, nil
+ }
+ if x.err != nil {
+ return false, err
+ }
+ if x.val != nil {
+ return false, err
+ }
+ return true, nil
+}
diff --git a/cmd/frostfs-lens/internal/tui/detailed.go b/cmd/frostfs-lens/internal/tui/detailed.go
new file mode 100644
index 000000000..b2d897230
--- /dev/null
+++ b/cmd/frostfs-lens/internal/tui/detailed.go
@@ -0,0 +1,24 @@
+package tui
+
+import (
+ "context"
+
+ "github.com/rivo/tview"
+)
+
+type DetailedView struct {
+ *tview.TextView
+}
+
+func NewDetailedView(detailed string) *DetailedView {
+ v := &DetailedView{
+ TextView: tview.NewTextView(),
+ }
+ v.SetDynamicColors(true)
+ v.SetText(detailed)
+ return v
+}
+
+func (v *DetailedView) Mount(_ context.Context) error { return nil }
+func (v *DetailedView) Update(_ context.Context) error { return nil }
+func (v *DetailedView) Unmount() {}
diff --git a/cmd/frostfs-lens/internal/tui/filter.go b/cmd/frostfs-lens/internal/tui/filter.go
new file mode 100644
index 000000000..e7879eca7
--- /dev/null
+++ b/cmd/frostfs-lens/internal/tui/filter.go
@@ -0,0 +1,44 @@
+package tui
+
+import (
+ "maps"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
+)
+
+type Filter struct {
+ values map[string]any
+ results map[string]common.FilterResult
+}
+
+func NewFilter(values map[string]any) *Filter {
+ f := &Filter{
+ values: maps.Clone(values),
+ results: make(map[string]common.FilterResult),
+ }
+ for tag := range values {
+ f.results[tag] = common.No
+ }
+ return f
+}
+
+func (f *Filter) Apply(e common.SchemaEntry) *Filter {
+ filter := &Filter{
+ values: f.values,
+ results: maps.Clone(f.results),
+ }
+
+ for tag, value := range filter.values {
+ filter.results[tag] = max(filter.results[tag], e.Filter(tag, value))
+ }
+
+ return filter
+}
+
+func (f *Filter) Result() common.FilterResult {
+ current := common.Yes
+ for _, r := range f.results {
+ current = min(r, current)
+ }
+ return current
+}
diff --git a/cmd/frostfs-lens/internal/tui/input.go b/cmd/frostfs-lens/internal/tui/input.go
new file mode 100644
index 000000000..4fdf97119
--- /dev/null
+++ b/cmd/frostfs-lens/internal/tui/input.go
@@ -0,0 +1,77 @@
+package tui
+
+import (
+ "github.com/gdamore/tcell/v2"
+ "github.com/rivo/tview"
+)
+
+type InputFieldWithHistory struct {
+ *tview.InputField
+ history []string
+ historyLimit int
+ historyPointer int
+ currentContent string
+}
+
+func NewInputFieldWithHistory(historyLimit int) *InputFieldWithHistory {
+ return &InputFieldWithHistory{
+ InputField: tview.NewInputField(),
+ historyLimit: historyLimit,
+ }
+}
+
+func (f *InputFieldWithHistory) AddToHistory(s string) {
+ // Stop scrolling history on history change, need to start scrolling again.
+ defer func() { f.historyPointer = len(f.history) }()
+
+ // Used history data for search prompt, so just make that data recent.
+ if f.historyPointer != len(f.history) && s == f.history[f.historyPointer] {
+ f.history = append(f.history[:f.historyPointer], f.history[f.historyPointer+1:]...)
+ f.history = append(f.history, s)
+ }
+
+ if len(f.history) == f.historyLimit {
+ f.history = f.history[1:]
+ }
+ f.history = append(f.history, s)
+}
+
+func (f *InputFieldWithHistory) InputHandler() func(event *tcell.EventKey, setFocus func(p tview.Primitive)) {
+ return f.WrapInputHandler(func(event *tcell.EventKey, _ func(tview.Primitive)) {
+ switch event.Key() {
+ case tcell.KeyDown:
+ if len(f.history) == 0 {
+ return
+ }
+ // Need to start iterating before.
+ if f.historyPointer == len(f.history) {
+ return
+ }
+ // Iterate to most recent prompts.
+ f.historyPointer++
+ // Stop iterating over history.
+ if f.historyPointer == len(f.history) {
+ f.InputField.SetText(f.currentContent)
+ return
+ }
+ f.InputField.SetText(f.history[f.historyPointer])
+ case tcell.KeyUp:
+ if len(f.history) == 0 {
+ return
+ }
+ // Start iterating over history.
+ if f.historyPointer == len(f.history) {
+ f.currentContent = f.InputField.GetText()
+ }
+ // End of history.
+ if f.historyPointer == 0 {
+ return
+ }
+ // Iterate to least recent prompts.
+ f.historyPointer--
+ f.InputField.SetText(f.history[f.historyPointer])
+ default:
+ f.InputField.InputHandler()(event, func(tview.Primitive) {})
+ }
+ })
+}
diff --git a/cmd/frostfs-lens/internal/tui/loading.go b/cmd/frostfs-lens/internal/tui/loading.go
new file mode 100644
index 000000000..4b9384ad4
--- /dev/null
+++ b/cmd/frostfs-lens/internal/tui/loading.go
@@ -0,0 +1,72 @@
+package tui
+
+import (
+ "context"
+ "fmt"
+ "sync/atomic"
+ "time"
+
+ "github.com/gdamore/tcell/v2"
+ "github.com/rivo/tview"
+)
+
+type LoadingBar struct {
+ *tview.Box
+ view *tview.TextView
+ secondsElapsed atomic.Int64
+ needDrawFunc func()
+ reset func()
+}
+
+func NewLoadingBar(needDrawFunc func()) *LoadingBar {
+ b := &LoadingBar{
+ Box: tview.NewBox(),
+ view: tview.NewTextView(),
+ needDrawFunc: needDrawFunc,
+ }
+ b.view.SetBackgroundColor(tview.Styles.PrimaryTextColor)
+ b.view.SetTextColor(b.GetBackgroundColor())
+
+ return b
+}
+
+func (b *LoadingBar) Start(ctx context.Context) {
+ ctx, b.reset = context.WithCancel(ctx)
+
+ go func() {
+ ticker := time.NewTicker(1 * time.Second)
+ defer ticker.Stop()
+
+ b.secondsElapsed.Store(0)
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case <-ticker.C:
+ b.secondsElapsed.Add(1)
+ b.needDrawFunc()
+ }
+ }
+ }()
+}
+
+func (b *LoadingBar) Stop() {
+ b.reset()
+}
+
+func (b *LoadingBar) Draw(screen tcell.Screen) {
+ seconds := b.secondsElapsed.Load()
+
+ var time string
+ switch {
+ case seconds < 60:
+ time = fmt.Sprintf("%ds", seconds)
+ default:
+ time = fmt.Sprintf("%dm%ds", seconds/60, seconds%60)
+ }
+ b.view.SetText(fmt.Sprintf(" Loading... %s (press Escape to cancel) ", time))
+
+ x, y, width, _ := b.GetInnerRect()
+ b.view.SetRect(x, y, width, 1)
+ b.view.Draw(screen)
+}
diff --git a/cmd/frostfs-lens/internal/tui/records.go b/cmd/frostfs-lens/internal/tui/records.go
new file mode 100644
index 000000000..5f53ed287
--- /dev/null
+++ b/cmd/frostfs-lens/internal/tui/records.go
@@ -0,0 +1,271 @@
+package tui
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "math"
+ "sync"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
+ "github.com/gdamore/tcell/v2"
+ "github.com/rivo/tview"
+)
+
+type updateType int
+
+const (
+ other updateType = iota
+ moveToPrevPage
+ moveToNextPage
+ moveUp
+ moveDown
+ moveHome
+ moveEnd
+)
+
+type RecordsView struct {
+ *tview.Box
+
+ mu sync.RWMutex
+
+ onUnmount func()
+
+ bucket *Bucket
+ records []*Record
+
+ buffer chan *Record
+
+ firstRecordIndex int
+ lastRecordIndex int
+ selectedRecordIndex int
+
+ updateType updateType
+
+ ui *UI
+ filter *Filter
+}
+
+func NewRecordsView(ui *UI, bucket *Bucket, filter *Filter) *RecordsView {
+ return &RecordsView{
+ Box: tview.NewBox(),
+ bucket: bucket,
+ ui: ui,
+ filter: filter,
+ }
+}
+
+func (v *RecordsView) Mount(ctx context.Context) error {
+ if v.onUnmount != nil {
+ return errors.New("try to mount already mounted component")
+ }
+
+ ctx, v.onUnmount = context.WithCancel(ctx)
+
+ tempBuffer, err := LoadRecords(ctx, v.ui.db, v.bucket.Path, v.ui.loadBufferSize)
+ if err != nil {
+ return err
+ }
+
+ v.buffer = make(chan *Record, v.ui.loadBufferSize)
+ go func() {
+ defer close(v.buffer)
+
+ for item := range tempBuffer {
+ if item.err != nil {
+ v.ui.stopOnError(err)
+ break
+ }
+ record := item.val
+
+ record.Entry, _, err = v.bucket.NextParser(record.Key, record.Value)
+ if err != nil {
+ v.ui.stopOnError(err)
+ break
+ }
+
+ if v.filter.Apply(record.Entry).Result() != common.Yes {
+ continue
+ }
+
+ v.buffer <- record
+ }
+ }()
+
+ return nil
+}
+
+func (v *RecordsView) Unmount() {
+ if v.onUnmount == nil {
+ panic("try to unmount not mounted component")
+ }
+ v.onUnmount()
+ v.onUnmount = nil
+}
+
+func (v *RecordsView) Update(ctx context.Context) error {
+ _, _, _, recordsPerPage := v.GetInnerRect()
+ firstRecordIndex, lastRecordIndex, selectedRecordIndex := v.getNewIndexes()
+
+loop:
+ for len(v.records) < lastRecordIndex {
+ select {
+ case <-ctx.Done():
+ return nil
+ case record, ok := <-v.buffer:
+ if !ok {
+ break loop
+ }
+ v.records = append(v.records, record)
+ }
+ }
+
+ // Set the update type to its default value after some specific key event
+ // has been handled.
+ v.updateType = other
+
+ firstRecordIndex = max(0, min(firstRecordIndex, len(v.records)-recordsPerPage))
+ lastRecordIndex = min(firstRecordIndex+recordsPerPage, len(v.records))
+ selectedRecordIndex = min(selectedRecordIndex, lastRecordIndex-1)
+
+ v.mu.Lock()
+ v.firstRecordIndex = firstRecordIndex
+ v.lastRecordIndex = lastRecordIndex
+ v.selectedRecordIndex = selectedRecordIndex
+ v.mu.Unlock()
+
+ return nil
+}
+
+func (v *RecordsView) getNewIndexes() (int, int, int) {
+ v.mu.RLock()
+ firstRecordIndex := v.firstRecordIndex
+ lastRecordIndex := v.lastRecordIndex
+ selectedRecordIndex := v.selectedRecordIndex
+ v.mu.RUnlock()
+
+ _, _, _, recordsPerPage := v.GetInnerRect()
+
+ switch v.updateType {
+ case moveUp:
+ if selectedRecordIndex != firstRecordIndex {
+ selectedRecordIndex--
+ break
+ }
+ firstRecordIndex = max(0, firstRecordIndex-1)
+ lastRecordIndex = min(firstRecordIndex+recordsPerPage, len(v.records))
+ selectedRecordIndex = firstRecordIndex
+ case moveToPrevPage:
+ if selectedRecordIndex != firstRecordIndex {
+ selectedRecordIndex = firstRecordIndex
+ break
+ }
+ firstRecordIndex = max(0, firstRecordIndex-recordsPerPage)
+ lastRecordIndex = firstRecordIndex + recordsPerPage
+ selectedRecordIndex = firstRecordIndex
+ case moveDown:
+ if selectedRecordIndex != lastRecordIndex-1 {
+ selectedRecordIndex++
+ break
+ }
+ firstRecordIndex++
+ lastRecordIndex++
+ selectedRecordIndex++
+ case moveToNextPage:
+ if selectedRecordIndex != lastRecordIndex-1 {
+ selectedRecordIndex = lastRecordIndex - 1
+ break
+ }
+ firstRecordIndex += recordsPerPage
+ lastRecordIndex = firstRecordIndex + recordsPerPage
+ selectedRecordIndex = lastRecordIndex - 1
+ case moveHome:
+ firstRecordIndex = 0
+ lastRecordIndex = firstRecordIndex + recordsPerPage
+ selectedRecordIndex = 0
+ case moveEnd:
+ lastRecordIndex = math.MaxInt32
+ firstRecordIndex = lastRecordIndex - recordsPerPage
+ selectedRecordIndex = lastRecordIndex - 1
+ default:
+ lastRecordIndex = firstRecordIndex + recordsPerPage
+ }
+
+ return firstRecordIndex, lastRecordIndex, selectedRecordIndex
+}
+
+func (v *RecordsView) GetInnerRect() (int, int, int, int) {
+ x, y, width, height := v.Box.GetInnerRect()
+
+ // Left padding.
+ x = min(x+3, x+width-1)
+ width = max(width-3, 0)
+
+ return x, y, width, height
+}
+
+func (v *RecordsView) Draw(screen tcell.Screen) {
+ v.mu.RLock()
+ firstRecordIndex := v.firstRecordIndex
+ lastRecordIndex := v.lastRecordIndex
+ selectedRecordIndex := v.selectedRecordIndex
+ records := v.records
+ v.mu.RUnlock()
+
+ v.DrawForSubclass(screen, v)
+
+ x, y, width, height := v.GetInnerRect()
+ if height == 0 {
+ return
+ }
+
+ // No records in that bucket.
+ if firstRecordIndex == lastRecordIndex {
+ tview.Print(
+ screen, "Empty Bucket", x, y, width, tview.AlignCenter, tview.Styles.PrimaryTextColor,
+ )
+ return
+ }
+
+ for index := firstRecordIndex; index < lastRecordIndex; index++ {
+ result := records[index].Entry
+ text := result.String()
+
+ if index == selectedRecordIndex {
+ text = fmt.Sprintf("[:white]%s[:-]", text)
+ tview.Print(screen, text, x, y, width, tview.AlignLeft, tview.Styles.PrimitiveBackgroundColor)
+ } else {
+ tview.Print(screen, text, x, y, width, tview.AlignLeft, tview.Styles.PrimaryTextColor)
+ }
+
+ y++
+ }
+}
+
+func (v *RecordsView) InputHandler() func(event *tcell.EventKey, _ func(p tview.Primitive)) {
+ return v.WrapInputHandler(func(event *tcell.EventKey, _ func(p tview.Primitive)) {
+ switch m, k := event.Modifiers(), event.Key(); {
+ case m == 0 && k == tcell.KeyPgUp:
+ v.updateType = moveToPrevPage
+ case m == 0 && k == tcell.KeyPgDn:
+ v.updateType = moveToNextPage
+ case m == 0 && k == tcell.KeyUp:
+ v.updateType = moveUp
+ case m == 0 && k == tcell.KeyDown:
+ v.updateType = moveDown
+ case m == 0 && k == tcell.KeyHome:
+ v.updateType = moveHome
+ case m == 0 && k == tcell.KeyEnd:
+ v.updateType = moveEnd
+ case k == tcell.KeyEnter:
+ v.mu.RLock()
+ selectedRecordIndex := v.selectedRecordIndex
+ records := v.records
+ v.mu.RUnlock()
+ if len(records) != 0 {
+ current := records[selectedRecordIndex]
+ v.ui.moveNextPage(NewDetailedView(current.Entry.DetailedString()))
+ }
+ }
+ })
+}
diff --git a/cmd/frostfs-lens/internal/tui/types.go b/cmd/frostfs-lens/internal/tui/types.go
new file mode 100644
index 000000000..4a227fe64
--- /dev/null
+++ b/cmd/frostfs-lens/internal/tui/types.go
@@ -0,0 +1,18 @@
+package tui
+
+import (
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
+)
+
+type Bucket struct {
+ Name []byte
+ Path [][]byte
+ Entry common.SchemaEntry
+ NextParser common.Parser
+}
+
+type Record struct {
+ Key, Value []byte
+ Path [][]byte
+ Entry common.SchemaEntry
+}
diff --git a/cmd/frostfs-lens/internal/tui/ui.go b/cmd/frostfs-lens/internal/tui/ui.go
new file mode 100644
index 000000000..701f2b331
--- /dev/null
+++ b/cmd/frostfs-lens/internal/tui/ui.go
@@ -0,0 +1,548 @@
+package tui
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "strings"
+ "sync/atomic"
+ "time"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
+ "github.com/davecgh/go-spew/spew"
+ "github.com/gdamore/tcell/v2"
+ "github.com/rivo/tview"
+ "go.etcd.io/bbolt"
+)
+
+type Config struct {
+ LoadBufferSize int
+ SearchHistorySize int
+ LoadingIndicatorLag time.Duration
+}
+
+var DefaultConfig = Config{
+ LoadBufferSize: 100,
+ SearchHistorySize: 100,
+ LoadingIndicatorLag: 500 * time.Millisecond,
+}
+
+type Primitive interface {
+ tview.Primitive
+
+ Mount(ctx context.Context) error
+ Update(ctx context.Context) error
+ Unmount()
+}
+
+type UI struct {
+ *tview.Box
+
+ // Need to use context while updating pages those read data from a database.
+ // Context should be shared among all mount and updates. Current TUI library
+ // doesn't use contexts at all, so I do that feature by myself.
+ //nolint:containedctx
+ ctx context.Context
+ onStop func()
+
+ app *tview.Application
+ db *bbolt.DB
+
+ pageHistory []Primitive
+ mountedPage Primitive
+
+ pageToMount Primitive
+
+ pageStub tview.Primitive
+
+ infoBar *tview.TextView
+ searchBar *InputFieldWithHistory
+ loadingBar *LoadingBar
+ helpBar *tview.TextView
+
+ searchErrorBar *tview.TextView
+
+ isSearching bool
+ isLoading atomic.Bool
+ isShowingError bool
+ isShowingHelp bool
+
+ loadBufferSize int
+
+ rootParser common.Parser
+
+ loadingIndicatorLag time.Duration
+
+ cancelLoading func()
+
+ filters map[string]func(string) (any, error)
+ compositeFilters map[string]func(string) (map[string]any, error)
+ filterHints map[string]string
+}
+
+func NewUI(
+ ctx context.Context,
+ app *tview.Application,
+ db *bbolt.DB,
+ rootParser common.Parser,
+ cfg *Config,
+) *UI {
+ spew.Config.DisableMethods = true
+
+ if cfg == nil {
+ cfg = &DefaultConfig
+ }
+
+ ui := &UI{
+ Box: tview.NewBox(),
+
+ app: app,
+ db: db,
+ rootParser: rootParser,
+
+ filters: make(map[string]func(string) (any, error)),
+ compositeFilters: make(map[string]func(string) (map[string]any, error)),
+ filterHints: make(map[string]string),
+
+ loadBufferSize: cfg.LoadBufferSize,
+ loadingIndicatorLag: cfg.LoadingIndicatorLag,
+ }
+
+ ui.ctx, ui.onStop = context.WithCancel(ctx)
+
+ backgroundColor := ui.GetBackgroundColor()
+ textColor := tview.Styles.PrimaryTextColor
+
+ inverseBackgroundColor := textColor
+ inverseTextColor := backgroundColor
+
+ alertTextColor := tcell.ColorRed
+
+ ui.pageStub = tview.NewBox()
+
+ ui.infoBar = tview.NewTextView()
+ ui.infoBar.SetBackgroundColor(inverseBackgroundColor)
+ ui.infoBar.SetTextColor(inverseTextColor)
+ ui.infoBar.SetText(
+ fmt.Sprintf(" %s (press h for help, q to quit) ", db.Path()),
+ )
+
+ ui.searchBar = NewInputFieldWithHistory(cfg.SearchHistorySize)
+ ui.searchBar.SetFieldBackgroundColor(backgroundColor)
+ ui.searchBar.SetFieldTextColor(textColor)
+ ui.searchBar.SetLabelColor(textColor)
+ ui.searchBar.Focus(nil)
+ ui.searchBar.SetLabel("/")
+
+ ui.searchErrorBar = tview.NewTextView()
+ ui.searchErrorBar.SetBackgroundColor(backgroundColor)
+ ui.searchErrorBar.SetTextColor(alertTextColor)
+
+ ui.helpBar = tview.NewTextView()
+ ui.helpBar.SetBackgroundColor(inverseBackgroundColor)
+ ui.helpBar.SetTextColor(inverseTextColor)
+ ui.helpBar.SetText(" Press Enter for next page or Escape to exit help ")
+
+ ui.loadingBar = NewLoadingBar(ui.triggerDraw)
+
+ ui.pageToMount = NewBucketsView(ui, NewFilter(nil))
+
+ return ui
+}
+
+func (ui *UI) checkFilterExists(typ string) bool {
+ if _, ok := ui.filters[typ]; ok {
+ return true
+ }
+ if _, ok := ui.compositeFilters[typ]; ok {
+ return true
+ }
+ return false
+}
+
+func (ui *UI) AddFilter(
+ typ string,
+ parser func(string) (any, error),
+ helpHint string,
+) error {
+ if ui.checkFilterExists(typ) {
+ return fmt.Errorf("filter %s already exists", typ)
+ }
+ ui.filters[typ] = parser
+ ui.filterHints[typ] = helpHint
+ return nil
+}
+
+func (ui *UI) AddCompositeFilter(
+ typ string,
+ parser func(string) (map[string]any, error),
+ helpHint string,
+) error {
+ if ui.checkFilterExists(typ) {
+ return fmt.Errorf("filter %s already exists", typ)
+ }
+ ui.compositeFilters[typ] = parser
+ ui.filterHints[typ] = helpHint
+ return nil
+}
+
+func (ui *UI) stopOnError(err error) {
+ if err != nil {
+ ui.onStop()
+ ui.app.QueueEvent(tcell.NewEventError(err))
+ }
+}
+
+func (ui *UI) stop() {
+ ui.onStop()
+ ui.app.Stop()
+}
+
+func (ui *UI) movePrevPage() {
+ if len(ui.pageHistory) != 0 {
+ ui.mountedPage.Unmount()
+ ui.mountedPage = ui.pageHistory[len(ui.pageHistory)-1]
+ ui.pageHistory = ui.pageHistory[:len(ui.pageHistory)-1]
+ ui.triggerDraw()
+ }
+}
+
+func (ui *UI) moveNextPage(page Primitive) {
+ ui.pageToMount = page
+ ui.triggerDraw()
+}
+
+func (ui *UI) triggerDraw() {
+ go ui.app.QueueUpdateDraw(func() {})
+}
+
+func (ui *UI) Draw(screen tcell.Screen) {
+ if ui.isLoading.Load() {
+ ui.draw(screen)
+ return
+ }
+
+ ui.isLoading.Store(true)
+
+ ctx, cancel := context.WithCancel(ui.ctx)
+
+ ready := make(chan struct{})
+ go func() {
+ ui.load(ctx)
+
+ cancel()
+ close(ready)
+ ui.isLoading.Store(false)
+ }()
+
+ select {
+ case <-ready:
+ case <-time.After(ui.loadingIndicatorLag):
+ ui.loadingBar.Start(ui.ctx)
+ ui.cancelLoading = cancel
+
+ go func() {
+ <-ready
+ ui.loadingBar.Stop()
+ ui.triggerDraw()
+ }()
+ }
+
+ ui.draw(screen)
+}
+
+func (ui *UI) load(ctx context.Context) {
+ if ui.mountedPage == nil && ui.pageToMount == nil {
+ ui.stop()
+ return
+ }
+
+ if ui.pageToMount != nil {
+ ui.mountAndUpdate(ctx)
+ } else {
+ ui.update(ctx)
+ }
+}
+
+func (ui *UI) draw(screen tcell.Screen) {
+ ui.DrawForSubclass(screen, ui)
+ x, y, width, height := ui.GetInnerRect()
+
+ var (
+ pageToDraw tview.Primitive
+ barToDraw tview.Primitive
+ )
+
+ switch {
+ case ui.isShowingHelp:
+ pageToDraw = ui.pageStub
+ case ui.mountedPage != nil:
+ pageToDraw = ui.mountedPage
+ default:
+ pageToDraw = ui.pageStub
+ }
+
+ pageToDraw.SetRect(x, y, width, height-1)
+ pageToDraw.Draw(screen)
+
+ // Search bar uses cursor and we need to hide it when another bar is drawn.
+ screen.HideCursor()
+
+ switch {
+ case ui.isLoading.Load():
+ barToDraw = ui.loadingBar
+ case ui.isSearching:
+ barToDraw = ui.searchBar
+ case ui.isShowingError:
+ barToDraw = ui.searchErrorBar
+ case ui.isShowingHelp:
+ barToDraw = ui.helpBar
+ default:
+ barToDraw = ui.infoBar
+ }
+
+ barToDraw.SetRect(x, y+height-1, width, 1)
+ barToDraw.Draw(screen)
+}
+
+func (ui *UI) mountAndUpdate(ctx context.Context) {
+ defer func() {
+ // Operation succeeded or was canceled, either way reset page to mount.
+ ui.pageToMount = nil
+ }()
+
+ // Mount should use app global context.
+ //nolint:contextcheck
+ err := ui.pageToMount.Mount(ui.ctx)
+ if err != nil {
+ ui.stopOnError(err)
+ return
+ }
+
+ x, y, width, height := ui.GetInnerRect()
+ ui.pageToMount.SetRect(x, y, width, height-1)
+
+ s := loadOp(ctx, ui.pageToMount.Update)
+ if s.err != nil {
+ ui.pageToMount.Unmount()
+ ui.stopOnError(s.err)
+ return
+ }
+ // Update was canceled.
+ if !s.done {
+ ui.pageToMount.Unmount()
+ return
+ }
+
+ if ui.mountedPage != nil {
+ ui.pageHistory = append(ui.pageHistory, ui.mountedPage)
+ }
+ ui.mountedPage = ui.pageToMount
+}
+
+func (ui *UI) update(ctx context.Context) {
+ x, y, width, height := ui.GetInnerRect()
+ ui.mountedPage.SetRect(x, y, width, height-1)
+
+ s := loadOp(ctx, ui.mountedPage.Update)
+ if s.err != nil {
+ ui.stopOnError(s.err)
+ return
+ }
+}
+
+type status struct {
+ done bool
+ err error
+}
+
+func loadOp(ctx context.Context, op func(ctx context.Context) error) status {
+ errCh := make(chan error)
+ go func() {
+ errCh <- op(ctx)
+ }()
+
+ select {
+ case <-ctx.Done():
+ return status{done: false, err: nil}
+ case err := <-errCh:
+ return status{done: true, err: err}
+ }
+}
+
+func (ui *UI) InputHandler() func(event *tcell.EventKey, setFocus func(p tview.Primitive)) {
+ return ui.WrapInputHandler(func(event *tcell.EventKey, _ func(tview.Primitive)) {
+ switch {
+ case ui.isLoading.Load():
+ ui.handleInputOnLoading(event)
+ case ui.isShowingHelp:
+ ui.handleInputOnShowingHelp(event)
+ case ui.isShowingError:
+ ui.handleInputOnShowingError()
+ case ui.isSearching:
+ ui.handleInputOnSearching(event)
+ default:
+ ui.handleInput(event)
+ }
+ })
+}
+
+func (ui *UI) handleInput(event *tcell.EventKey) {
+ m, k, r := event.Modifiers(), event.Key(), event.Rune()
+
+ switch {
+ case k == tcell.KeyEsc:
+ ui.movePrevPage()
+ case m == 0 && k == tcell.KeyRune && r == 'h':
+ ui.isShowingHelp = true
+ case m == 0 && k == tcell.KeyRune && r == '/':
+ ui.isSearching = true
+ case m == 0 && k == tcell.KeyRune && r == 'q':
+ ui.stop()
+ default:
+ if ui.mountedPage != nil {
+ ui.mountedPage.InputHandler()(event, func(tview.Primitive) {})
+ }
+ }
+}
+
+func (ui *UI) handleInputOnLoading(event *tcell.EventKey) {
+ switch k, r := event.Key(), event.Rune(); {
+ case k == tcell.KeyEsc:
+ ui.cancelLoading()
+ case k == tcell.KeyRune && r == 'q':
+ ui.stop()
+ }
+}
+
+func (ui *UI) handleInputOnShowingError() {
+ ui.isShowingError = false
+ ui.isSearching = true
+}
+
+func (ui *UI) handleInputOnShowingHelp(event *tcell.EventKey) {
+ k, r := event.Key(), event.Rune()
+
+ switch {
+ case k == tcell.KeyEsc:
+ ui.isShowingHelp = false
+ case k == tcell.KeyRune && r == 'q':
+ ui.stop()
+ default:
+ }
+}
+
+func (ui *UI) handleInputOnSearching(event *tcell.EventKey) {
+ m, k := event.Modifiers(), event.Key()
+
+ switch {
+ case k == tcell.KeyEnter:
+ prompt := ui.searchBar.GetText()
+
+ res, err := ui.processPrompt(prompt)
+ if err != nil {
+ ui.isShowingError = true
+ ui.isSearching = false
+ ui.searchErrorBar.SetText(err.Error() + " (press any key to continue)")
+ return
+ }
+
+ switch ui.mountedPage.(type) {
+ case *BucketsView:
+ ui.moveNextPage(NewBucketsView(ui, res))
+ case *RecordsView:
+ bucket := ui.mountedPage.(*RecordsView).bucket
+ ui.moveNextPage(NewRecordsView(ui, bucket, res))
+ }
+
+ if ui.searchBar.GetText() != "" {
+ ui.searchBar.AddToHistory(ui.searchBar.GetText())
+ }
+
+ ui.searchBar.SetText("")
+ ui.isSearching = false
+ case k == tcell.KeyEsc:
+ ui.isSearching = false
+ case (k == tcell.KeyBackspace2 || m&tcell.ModCtrl != 0 && k == tcell.KeyETB) && len(ui.searchBar.GetText()) == 0:
+ ui.isSearching = false
+ default:
+ ui.searchBar.InputHandler()(event, func(tview.Primitive) {})
+ }
+
+ ui.Box.MouseHandler()
+}
+
+func (ui *UI) WithPrompt(prompt string) error {
+ filter, err := ui.processPrompt(prompt)
+ if err != nil {
+ return err
+ }
+
+ ui.pageToMount = NewBucketsView(ui, filter)
+
+ if prompt != "" {
+ ui.searchBar.AddToHistory(prompt)
+ }
+
+ return nil
+}
+
+func (ui *UI) processPrompt(prompt string) (filter *Filter, err error) {
+ if prompt == "" {
+ return NewFilter(nil), nil
+ }
+
+ filterMap := make(map[string]any)
+
+ for _, filterString := range strings.Split(prompt, "+") {
+ parts := strings.Split(filterString, ":")
+ if len(parts) != 2 {
+ return nil, errors.New("expected 'tag:value [+ tag:value]...'")
+ }
+
+ filterTag := strings.TrimSpace(parts[0])
+ filterValueString := strings.TrimSpace(parts[1])
+
+ if _, exists := filterMap[filterTag]; exists {
+ return nil, fmt.Errorf("duplicate filter tag '%s'", filterTag)
+ }
+
+ parser, ok := ui.filters[filterTag]
+ if ok {
+ filterValue, err := parser(filterValueString)
+ if err != nil {
+ return nil, fmt.Errorf("can't parse '%s' filter value: %w", filterTag, err)
+ }
+
+ filterMap[filterTag] = filterValue
+ continue
+ }
+
+ compositeParser, ok := ui.compositeFilters[filterTag]
+ if ok {
+ compositeFilterValue, err := compositeParser(filterValueString)
+ if err != nil {
+ return nil, fmt.Errorf(
+ "can't parse '%s' filter value '%s': %w",
+ filterTag, filterValueString, err,
+ )
+ }
+
+ for tag, value := range compositeFilterValue {
+ if _, exists := filterMap[tag]; exists {
+ return nil, fmt.Errorf(
+ "found duplicate filter tag '%s' while processing composite filter with tag '%s'",
+ tag, filterTag,
+ )
+ }
+
+ filterMap[tag] = value
+ }
+ continue
+ }
+
+ return nil, fmt.Errorf("unknown filter tag '%s'", filterTag)
+ }
+
+ return NewFilter(filterMap), nil
+}
diff --git a/cmd/frostfs-lens/internal/tui/util.go b/cmd/frostfs-lens/internal/tui/util.go
new file mode 100644
index 000000000..d4e13b2a9
--- /dev/null
+++ b/cmd/frostfs-lens/internal/tui/util.go
@@ -0,0 +1,97 @@
+package tui
+
+import (
+ "errors"
+ "strings"
+
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "github.com/mr-tron/base58"
+)
+
+func CIDParser(s string) (any, error) {
+ data, err := base58.Decode(s)
+ if err != nil {
+ return nil, err
+ }
+ var id cid.ID
+ if err = id.Decode(data); err != nil {
+ return nil, err
+ }
+ return id, nil
+}
+
+func OIDParser(s string) (any, error) {
+ data, err := base58.Decode(s)
+ if err != nil {
+ return nil, err
+ }
+ var id oid.ID
+ if err = id.Decode(data); err != nil {
+ return nil, err
+ }
+ return id, nil
+}
+
+func AddressParser(s string) (map[string]any, error) {
+ m := make(map[string]any)
+
+ parts := strings.Split(s, "/")
+ if len(parts) != 2 {
+ return nil, errors.New("expected /")
+ }
+ cnr, err := CIDParser(parts[0])
+ if err != nil {
+ return nil, err
+ }
+ obj, err := OIDParser(parts[1])
+ if err != nil {
+ return nil, err
+ }
+
+ m["cid"] = cnr
+ m["oid"] = obj
+
+ return m, nil
+}
+
+func keyParser(s string) (any, error) {
+ if s == "" {
+ return nil, errors.New("empty attribute key")
+ }
+ return s, nil
+}
+
+func valueParser(s string) (any, error) {
+ if s == "" {
+ return nil, errors.New("empty attribute value")
+ }
+ return s, nil
+}
+
+func AttributeParser(s string) (map[string]any, error) {
+ m := make(map[string]any)
+
+ parts := strings.Split(s, "/")
+ if len(parts) != 1 && len(parts) != 2 {
+ return nil, errors.New("expected or /")
+ }
+
+ key, err := keyParser(parts[0])
+ if err != nil {
+ return nil, err
+ }
+ m["key"] = key
+
+ if len(parts) == 1 {
+ return m, nil
+ }
+
+ value, err := valueParser(parts[1])
+ if err != nil {
+ return nil, err
+ }
+ m["value"] = value
+
+ return m, nil
+}
diff --git a/go.mod b/go.mod
index be3c6e74d..93eef5b8c 100644
--- a/go.mod
+++ b/go.mod
@@ -32,6 +32,7 @@ require (
github.com/olekukonko/tablewriter v0.0.5
github.com/panjf2000/ants/v2 v2.9.0
github.com/prometheus/client_golang v1.19.0
+ github.com/rivo/tview v0.0.0-20240625185742-b0a7293b8130
github.com/spf13/cast v1.6.0
github.com/spf13/cobra v1.8.1
github.com/spf13/pflag v1.0.5
@@ -106,7 +107,7 @@ require (
github.com/prometheus/client_model v0.5.0 // indirect
github.com/prometheus/common v0.48.0 // indirect
github.com/prometheus/procfs v0.12.0 // indirect
- github.com/rivo/uniseg v0.4.4 // indirect
+ github.com/rivo/uniseg v0.4.7 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/spaolacci/murmur3 v1.1.0 // indirect
github.com/spf13/afero v1.11.0 // indirect
diff --git a/go.sum b/go.sum
index d0218a348..102501484 100644
--- a/go.sum
+++ b/go.sum
@@ -223,10 +223,12 @@ github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSz
github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc=
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
+github.com/rivo/tview v0.0.0-20240625185742-b0a7293b8130 h1:o1CYtoFOm6xJK3DvDAEG5wDJPLj+SoxUtUDFaQgt1iY=
+github.com/rivo/tview v0.0.0-20240625185742-b0a7293b8130/go.mod h1:02iFIz7K/A9jGCvrizLPvoqr4cEIx7q54RH5Qudkrss=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.4.3/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
-github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
-github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
+github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
+github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
From e6553363905ef350b9faf12e7a42d52cf624815c Mon Sep 17 00:00:00 2001
From: Aleksey Savchuk
Date: Fri, 16 Aug 2024 17:27:35 +0300
Subject: [PATCH 075/705] [#1223] lens/tui: Add app help
Signed-off-by: Aleksey Savchuk
---
.../internal/tui/help-pages/hotkeys.txt | 38 +++++++
.../internal/tui/help-pages/searching.txt | 26 +++++
cmd/frostfs-lens/internal/tui/help.go | 101 ++++++++++++++++++
cmd/frostfs-lens/internal/tui/ui.go | 15 ++-
4 files changed, 179 insertions(+), 1 deletion(-)
create mode 100644 cmd/frostfs-lens/internal/tui/help-pages/hotkeys.txt
create mode 100644 cmd/frostfs-lens/internal/tui/help-pages/searching.txt
create mode 100644 cmd/frostfs-lens/internal/tui/help.go
diff --git a/cmd/frostfs-lens/internal/tui/help-pages/hotkeys.txt b/cmd/frostfs-lens/internal/tui/help-pages/hotkeys.txt
new file mode 100644
index 000000000..c371b34e9
--- /dev/null
+++ b/cmd/frostfs-lens/internal/tui/help-pages/hotkeys.txt
@@ -0,0 +1,38 @@
+[green::b]HOTKEYS[-::-]
+
+ [green::b]Navigation[-::-]
+
+ [yellow::b]Down Arrow[-::-] / [yellow::b]j[-::-]
+ Scroll down.
+
+ [yellow::b]Up Arrow[-::-] / [yellow::b]k[-::-]
+ Scroll up.
+
+ [yellow::b]Page Down[-::-] / [yellow::b]Ctrl-f[-::-]
+ Scroll down by a full page.
+
+ [yellow::b]Page Up[-::-] / [yellow::b]Ctrl-b[-::-]
+ Scroll up by a full page.
+
+ [green::b]Actions[-::-]
+
+ [yellow::b]Enter[-::-]
+ Perform actions based on the current context:
+ - In Buckets View:
+ - Expand/collapse the selected bucket to show/hide its nested buckets.
+ - If no nested buckets exist, navigate to the selected bucket's records.
+ - In Records View: Open the detailed view of the selected record.
+
+ [yellow::b]Escape[-::-]
+ Return to the previous page, opposite of [yellow::b]Enter[-::-].
+
+ Refer to the [green::b]SEARCHING[-::-] section for more specific actions.
+
+
+ [green::b]Alternative Action Hotkeys[-::-]
+
+ [yellow::b]Ctrl-r[-::-]
+ Directly navigate to the selected bucket's records.
+
+ [yellow::b]Ctrl-d[-::-]
+ Access the detailed view of the selected bucket.
diff --git a/cmd/frostfs-lens/internal/tui/help-pages/searching.txt b/cmd/frostfs-lens/internal/tui/help-pages/searching.txt
new file mode 100644
index 000000000..bc2be512b
--- /dev/null
+++ b/cmd/frostfs-lens/internal/tui/help-pages/searching.txt
@@ -0,0 +1,26 @@
+[green::b]SEARCHING[-::-]
+
+ [green::b]Hotkeys[-::-]
+
+ [yellow::b]/[-::-]
+ Initiate the search prompt.
+ - The prompt follows this syntax: [yellow::b]tag:value [+ tag:value]...[-::-]
+ - Multiple filter can be combined with [yellow::b]+[-::-], the result is an intersection of those filters' result sets.
+ - Any leading and trailing whitespace will be ignored.
+ - An empty prompt will return all results with no filters applied.
+ - Refer to the [green::b]Available Search Filters[-::-] section below for a list of valid filter tags.
+
+ [yellow::b]Enter[-::-]
+ Execute the search based on the entered prompt.
+ - If the prompt is invalid, an error message will be displayed.
+
+ [yellow::b]Escape[-::-]
+ Exit the search prompt without performing a search.
+
+ [yellow::b]Down Arrow[-::-], [yellow::b]Up Arrow[-::-]
+ Scroll through the search history.
+
+
+ [green::b]Available Search Filters[-::-]
+
+%s
diff --git a/cmd/frostfs-lens/internal/tui/help.go b/cmd/frostfs-lens/internal/tui/help.go
new file mode 100644
index 000000000..3ab8fede0
--- /dev/null
+++ b/cmd/frostfs-lens/internal/tui/help.go
@@ -0,0 +1,101 @@
+package tui
+
+import (
+ _ "embed"
+ "fmt"
+ "strings"
+
+ "github.com/gdamore/tcell/v2"
+ "github.com/rivo/tview"
+)
+
+var (
+ //go:embed help-pages/hotkeys.txt
+ hotkeysHelpText string
+
+ //go:embed help-pages/searching.txt
+ searchingHelpText string
+)
+
+type HelpPage struct {
+ *tview.Box
+ pages []*tview.TextView
+ currentPage int
+
+ filters []string
+ filterHints map[string]string
+}
+
+func NewHelpPage(filters []string, hints map[string]string) *HelpPage {
+ hp := &HelpPage{
+ Box: tview.NewBox(),
+ filters: filters,
+ filterHints: hints,
+ }
+
+ page := tview.NewTextView().
+ SetDynamicColors(true).
+ SetText(hotkeysHelpText)
+ hp.addPage(page)
+
+ page = tview.NewTextView().
+ SetDynamicColors(true).
+ SetText(fmt.Sprintf(searchingHelpText, hp.getFiltersText()))
+ hp.addPage(page)
+
+ return hp
+}
+
+func (hp *HelpPage) addPage(page *tview.TextView) {
+ hp.pages = append(hp.pages, page)
+}
+
+func (hp *HelpPage) getFiltersText() string {
+ if len(hp.filters) == 0 {
+ return "\t\tNo filters defined.\n"
+ }
+
+ filtersText := strings.Builder{}
+ gapSize := 4
+
+ tagMaxWidth := 3
+ for _, filter := range hp.filters {
+ tagMaxWidth = max(tagMaxWidth, len(filter))
+ }
+ filtersText.WriteString("\t\t[yellow::b]Tag")
+ filtersText.WriteString(strings.Repeat(" ", gapSize))
+ filtersText.WriteString("\tValue[-::-]\n\n")
+
+ for _, filter := range hp.filters {
+ filtersText.WriteString("\t\t")
+ filtersText.WriteString(filter)
+ filtersText.WriteString(strings.Repeat(" ", tagMaxWidth-len(filter)+gapSize))
+ filtersText.WriteString(hp.filterHints[filter])
+ filtersText.WriteRune('\n')
+ }
+
+ return filtersText.String()
+}
+
+func (hp *HelpPage) Draw(screen tcell.Screen) {
+ x, y, width, height := hp.GetInnerRect()
+ hp.pages[hp.currentPage].SetRect(x+1, y+1, width-2, height-2)
+ hp.pages[hp.currentPage].Draw(screen)
+}
+
+func (hp *HelpPage) InputHandler() func(event *tcell.EventKey, setFocus func(p tview.Primitive)) {
+ return hp.WrapInputHandler(func(event *tcell.EventKey, _ func(tview.Primitive)) {
+ if event.Key() == tcell.KeyEnter {
+ hp.currentPage++
+ hp.currentPage %= len(hp.pages)
+ return
+ }
+ hp.pages[hp.currentPage].InputHandler()(event, func(tview.Primitive) {})
+ })
+}
+
+func (hp *HelpPage) MouseHandler() func(action tview.MouseAction, event *tcell.EventMouse, setFocus func(p tview.Primitive)) (consumed bool, capture tview.Primitive) {
+ return hp.WrapMouseHandler(func(action tview.MouseAction, event *tcell.EventMouse, _ func(tview.Primitive)) (consumed bool, capture tview.Primitive) {
+ return hp.pages[hp.currentPage].MouseHandler()(action, event, func(tview.Primitive) {})
+ })
+}
diff --git a/cmd/frostfs-lens/internal/tui/ui.go b/cmd/frostfs-lens/internal/tui/ui.go
index 701f2b331..bcc082821 100644
--- a/cmd/frostfs-lens/internal/tui/ui.go
+++ b/cmd/frostfs-lens/internal/tui/ui.go
@@ -60,6 +60,8 @@ type UI struct {
loadingBar *LoadingBar
helpBar *tview.TextView
+ helpPage *HelpPage
+
searchErrorBar *tview.TextView
isSearching bool
@@ -275,7 +277,17 @@ func (ui *UI) draw(screen tcell.Screen) {
switch {
case ui.isShowingHelp:
- pageToDraw = ui.pageStub
+ if ui.helpPage == nil {
+ var filters []string
+ for f := range ui.filters {
+ filters = append(filters, f)
+ }
+ for f := range ui.compositeFilters {
+ filters = append(filters, f)
+ }
+ ui.helpPage = NewHelpPage(filters, ui.filterHints)
+ }
+ pageToDraw = ui.helpPage
case ui.mountedPage != nil:
pageToDraw = ui.mountedPage
default:
@@ -429,6 +441,7 @@ func (ui *UI) handleInputOnShowingHelp(event *tcell.EventKey) {
case k == tcell.KeyRune && r == 'q':
ui.stop()
default:
+ ui.helpPage.InputHandler()(event, func(tview.Primitive) {})
}
}
From 371d97f61adc7cc74da815764a75a5438e865eda Mon Sep 17 00:00:00 2001
From: Aleksey Savchuk
Date: Mon, 19 Aug 2024 18:02:11 +0300
Subject: [PATCH 076/705] [#1223] lens/tui: Add TUI app for write cache
Signed-off-by: Aleksey Savchuk
---
cmd/frostfs-lens/internal/writecache/root.go | 2 +-
cmd/frostfs-lens/internal/writecache/tui.go | 79 ++++++++++++++++++++
2 files changed, 80 insertions(+), 1 deletion(-)
create mode 100644 cmd/frostfs-lens/internal/writecache/tui.go
diff --git a/cmd/frostfs-lens/internal/writecache/root.go b/cmd/frostfs-lens/internal/writecache/root.go
index eb3b325b6..d7d6db240 100644
--- a/cmd/frostfs-lens/internal/writecache/root.go
+++ b/cmd/frostfs-lens/internal/writecache/root.go
@@ -17,5 +17,5 @@ var Root = &cobra.Command{
}
func init() {
- Root.AddCommand(listCMD, inspectCMD)
+ Root.AddCommand(listCMD, inspectCMD, tuiCMD)
}
diff --git a/cmd/frostfs-lens/internal/writecache/tui.go b/cmd/frostfs-lens/internal/writecache/tui.go
new file mode 100644
index 000000000..6b7532b08
--- /dev/null
+++ b/cmd/frostfs-lens/internal/writecache/tui.go
@@ -0,0 +1,79 @@
+package writecache
+
+import (
+ "context"
+ "fmt"
+
+ common "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal"
+ schema "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/writecache"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/tui"
+ "github.com/rivo/tview"
+ "github.com/spf13/cobra"
+ "go.etcd.io/bbolt"
+)
+
+var tuiCMD = &cobra.Command{
+ Use: "explore",
+ Short: "Write cache exploration with a terminal UI",
+ Long: `Launch a terminal UI to explore write cache and search for data.
+
+Available search filters:
+- cid CID
+- oid OID
+- addr CID/OID
+`,
+ Run: tuiFunc,
+}
+
+var initialPrompt string
+
+func init() {
+ common.AddComponentPathFlag(tuiCMD, &vPath)
+
+ tuiCMD.Flags().StringVar(
+ &initialPrompt,
+ "filter",
+ "",
+ "Filter prompt to start with, format 'tag:value [+ tag:value]...'",
+ )
+}
+
+func tuiFunc(cmd *cobra.Command, _ []string) {
+ common.ExitOnErr(cmd, runTUI(cmd))
+}
+
+func runTUI(cmd *cobra.Command) error {
+ db, err := openDB(false)
+ if err != nil {
+ return fmt.Errorf("couldn't open database: %w", err)
+ }
+ defer db.Close()
+
+ ctx, cancel := context.WithCancel(cmd.Context())
+ defer cancel()
+
+ app := tview.NewApplication()
+ ui := tui.NewUI(ctx, app, db, schema.WritecacheParser, nil)
+
+ _ = ui.AddFilter("cid", tui.CIDParser, "CID")
+ _ = ui.AddFilter("oid", tui.OIDParser, "OID")
+ _ = ui.AddCompositeFilter("addr", tui.AddressParser, "CID/OID")
+
+ err = ui.WithPrompt(initialPrompt)
+ if err != nil {
+ return fmt.Errorf("invalid filter prompt: %w", err)
+ }
+
+ app.SetRoot(ui, true).SetFocus(ui)
+ return app.Run()
+}
+
+func openDB(writable bool) (*bbolt.DB, error) {
+ db, err := bbolt.Open(vPath, 0o600, &bbolt.Options{
+ ReadOnly: !writable,
+ })
+ if err != nil {
+ return nil, err
+ }
+ return db, nil
+}
From 7768a482b595e578570f6b3c705b6f7754705fab Mon Sep 17 00:00:00 2001
From: Aleksey Savchuk
Date: Thu, 22 Aug 2024 15:07:51 +0300
Subject: [PATCH 077/705] [#1223] lens/tui: Add TUI app for blobovnicza
Signed-off-by: Aleksey Savchuk
---
cmd/frostfs-lens/internal/blobovnicza/root.go | 2 +-
cmd/frostfs-lens/internal/blobovnicza/tui.go | 79 ++++++++++++++
.../internal/schema/blobovnicza/parsers.go | 96 +++++++++++++++++
.../internal/schema/blobovnicza/types.go | 101 ++++++++++++++++++
4 files changed, 277 insertions(+), 1 deletion(-)
create mode 100644 cmd/frostfs-lens/internal/blobovnicza/tui.go
create mode 100644 cmd/frostfs-lens/internal/schema/blobovnicza/parsers.go
create mode 100644 cmd/frostfs-lens/internal/schema/blobovnicza/types.go
diff --git a/cmd/frostfs-lens/internal/blobovnicza/root.go b/cmd/frostfs-lens/internal/blobovnicza/root.go
index 0a0cd955d..9d8ef3dad 100644
--- a/cmd/frostfs-lens/internal/blobovnicza/root.go
+++ b/cmd/frostfs-lens/internal/blobovnicza/root.go
@@ -19,7 +19,7 @@ var Root = &cobra.Command{
}
func init() {
- Root.AddCommand(listCMD, inspectCMD)
+ Root.AddCommand(listCMD, inspectCMD, tuiCMD)
}
func openBlobovnicza(cmd *cobra.Command) *blobovnicza.Blobovnicza {
diff --git a/cmd/frostfs-lens/internal/blobovnicza/tui.go b/cmd/frostfs-lens/internal/blobovnicza/tui.go
new file mode 100644
index 000000000..eb4a5ff59
--- /dev/null
+++ b/cmd/frostfs-lens/internal/blobovnicza/tui.go
@@ -0,0 +1,79 @@
+package blobovnicza
+
+import (
+ "context"
+ "fmt"
+
+ common "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal"
+ schema "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/blobovnicza"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/tui"
+ "github.com/rivo/tview"
+ "github.com/spf13/cobra"
+ "go.etcd.io/bbolt"
+)
+
+var tuiCMD = &cobra.Command{
+ Use: "explore",
+ Short: "Blobovnicza exploration with a terminal UI",
+ Long: `Launch a terminal UI to explore blobovnicza and search for data.
+
+Available search filters:
+- cid CID
+- oid OID
+- addr CID/OID
+`,
+ Run: tuiFunc,
+}
+
+var initialPrompt string
+
+func init() {
+ common.AddComponentPathFlag(tuiCMD, &vPath)
+
+ tuiCMD.Flags().StringVar(
+ &initialPrompt,
+ "filter",
+ "",
+ "Filter prompt to start with, format 'tag:value [+ tag:value]...'",
+ )
+}
+
+func tuiFunc(cmd *cobra.Command, _ []string) {
+ common.ExitOnErr(cmd, runTUI(cmd))
+}
+
+func runTUI(cmd *cobra.Command) error {
+ db, err := openDB(false)
+ if err != nil {
+ return fmt.Errorf("couldn't open database: %w", err)
+ }
+ defer db.Close()
+
+ ctx, cancel := context.WithCancel(cmd.Context())
+ defer cancel()
+
+ app := tview.NewApplication()
+ ui := tui.NewUI(ctx, app, db, schema.BlobovniczaParser, nil)
+
+ _ = ui.AddFilter("cid", tui.CIDParser, "CID")
+ _ = ui.AddFilter("oid", tui.OIDParser, "OID")
+ _ = ui.AddCompositeFilter("addr", tui.AddressParser, "CID/OID")
+
+ err = ui.WithPrompt(initialPrompt)
+ if err != nil {
+ return fmt.Errorf("invalid filter prompt: %w", err)
+ }
+
+ app.SetRoot(ui, true).SetFocus(ui)
+ return app.Run()
+}
+
+func openDB(writable bool) (*bbolt.DB, error) {
+ db, err := bbolt.Open(vPath, 0o600, &bbolt.Options{
+ ReadOnly: !writable,
+ })
+ if err != nil {
+ return nil, err
+ }
+ return db, nil
+}
diff --git a/cmd/frostfs-lens/internal/schema/blobovnicza/parsers.go b/cmd/frostfs-lens/internal/schema/blobovnicza/parsers.go
new file mode 100644
index 000000000..02b6cf414
--- /dev/null
+++ b/cmd/frostfs-lens/internal/schema/blobovnicza/parsers.go
@@ -0,0 +1,96 @@
+package blobovnicza
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "strings"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "github.com/mr-tron/base58"
+)
+
+var BlobovniczaParser = common.WithFallback(
+ common.Any(
+ MetaBucketParser,
+ BucketParser,
+ ),
+ common.RawParser.ToFallbackParser(),
+)
+
+func MetaBucketParser(key, value []byte) (common.SchemaEntry, common.Parser, error) {
+ if value != nil {
+ return nil, nil, errors.New("not a bucket")
+ }
+
+ if string(key) != "META" {
+ return nil, nil, errors.New("invalid bucket name")
+ }
+
+ return &MetaBucket{}, MetaRecordParser, nil
+}
+
+func MetaRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) {
+ var r MetaRecord
+
+ if len(key) == 0 {
+ return nil, nil, errors.New("invalid key")
+ }
+
+ r.label = string(key)
+ r.count = binary.LittleEndian.Uint64(value)
+
+ return &r, nil, nil
+}
+
+func BucketParser(key, value []byte) (common.SchemaEntry, common.Parser, error) {
+ if value != nil {
+ return nil, nil, errors.New("not a bucket")
+ }
+
+ size, n := binary.Varint(key)
+ if n <= 0 {
+ return nil, nil, errors.New("invalid size")
+ }
+
+ return &Bucket{size: size}, RecordParser, nil
+}
+
+func RecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) {
+ parts := strings.Split(string(key), "/")
+
+ if len(parts) != 2 {
+ return nil, nil, errors.New("invalid key, expected address string /")
+ }
+
+ cnrRaw, err := base58.Decode(parts[0])
+ if err != nil {
+ return nil, nil, errors.New("can't decode CID string")
+ }
+ objRaw, err := base58.Decode(parts[1])
+ if err != nil {
+ return nil, nil, errors.New("can't decode OID string")
+ }
+
+ cnr := cid.ID{}
+ if err := cnr.Decode(cnrRaw); err != nil {
+ return nil, nil, fmt.Errorf("can't decode CID: %w", err)
+ }
+ obj := oid.ID{}
+ if err := obj.Decode(objRaw); err != nil {
+ return nil, nil, fmt.Errorf("can't decode OID: %w", err)
+ }
+
+ var r Record
+
+ r.addr.SetContainer(cnr)
+ r.addr.SetObject(obj)
+
+ if err := r.object.Unmarshal(value); err != nil {
+ return nil, nil, errors.New("can't unmarshal object")
+ }
+
+ return &r, nil, nil
+}
diff --git a/cmd/frostfs-lens/internal/schema/blobovnicza/types.go b/cmd/frostfs-lens/internal/schema/blobovnicza/types.go
new file mode 100644
index 000000000..c7ed08cdd
--- /dev/null
+++ b/cmd/frostfs-lens/internal/schema/blobovnicza/types.go
@@ -0,0 +1,101 @@
+package blobovnicza
+
+import (
+ "fmt"
+ "strconv"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "github.com/davecgh/go-spew/spew"
+ "github.com/gdamore/tcell/v2"
+ "github.com/rivo/tview"
+)
+
+type (
+ MetaBucket struct{}
+
+ MetaRecord struct {
+ label string
+ count uint64
+ }
+
+ Bucket struct {
+ size int64
+ }
+
+ Record struct {
+ addr oid.Address
+ object objectSDK.Object
+ }
+)
+
+func (b *MetaBucket) String() string {
+ return common.FormatSimple("META", tcell.ColorLime)
+}
+
+func (b *MetaBucket) DetailedString() string {
+ return spew.Sdump(*b)
+}
+
+func (b *MetaBucket) Filter(string, any) common.FilterResult {
+ return common.No
+}
+
+func (r *MetaRecord) String() string {
+ return fmt.Sprintf("%-11s %c %d", r.label, tview.Borders.Vertical, r.count)
+}
+
+func (r *MetaRecord) DetailedString() string {
+ return spew.Sdump(*r)
+}
+
+func (r *MetaRecord) Filter(string, any) common.FilterResult {
+ return common.No
+}
+
+func (b *Bucket) String() string {
+ return common.FormatSimple(strconv.FormatInt(b.size, 10), tcell.ColorLime)
+}
+
+func (b *Bucket) DetailedString() string {
+ return spew.Sdump(*b)
+}
+
+func (b *Bucket) Filter(typ string, _ any) common.FilterResult {
+ switch typ {
+ case "cid":
+ return common.Maybe
+ case "oid":
+ return common.Maybe
+ default:
+ return common.No
+ }
+}
+
+func (r *Record) String() string {
+ return fmt.Sprintf(
+ "CID %s OID %s %c Object {...}",
+ common.FormatSimple(fmt.Sprintf("%-44s", r.addr.Container()), tcell.ColorAqua),
+ common.FormatSimple(fmt.Sprintf("%-44s", r.addr.Object()), tcell.ColorAqua),
+ tview.Borders.Vertical,
+ )
+}
+
+func (r *Record) DetailedString() string {
+ return spew.Sdump(*r)
+}
+
+func (r *Record) Filter(typ string, val any) common.FilterResult {
+ switch typ {
+ case "cid":
+ id := val.(cid.ID)
+ return common.IfThenElse(r.addr.Container().Equals(id), common.Yes, common.No)
+ case "oid":
+ id := val.(oid.ID)
+ return common.IfThenElse(r.addr.Object().Equals(id), common.Yes, common.No)
+ default:
+ return common.No
+ }
+}
From b3deb893ba26aa3ec9ce93213cef16243cc0f58d Mon Sep 17 00:00:00 2001
From: Airat Arifullin
Date: Fri, 30 Aug 2024 12:09:14 +0300
Subject: [PATCH 078/705] [#1310] object: Move target initialization to
separate package
* Split the logic of write target initialization to different packages;
* Refactor patch and put services: since both service initialize the target
themselves.
Signed-off-by: Airat Arifullin
---
cmd/frostfs-node/cache.go | 6 +-
cmd/frostfs-node/object.go | 17 +-
.../object/{put => common/target}/builder.go | 2 +-
.../object/{put => common/target}/pool.go | 2 +-
pkg/services/object/common/target/target.go | 170 +++++++++++
.../{put => common/target}/validation.go | 2 +-
.../object/{put => common/writer}/common.go | 42 +--
.../writer.go => common/writer/dispatcher.go} | 2 +-
.../{put => common/writer}/distributed.go | 70 ++---
.../object/{put => common/writer}/ec.go | 104 +++----
.../object/{put => common/writer}/local.go | 14 +-
.../object/{put => common/writer}/remote.go | 12 +-
pkg/services/object/common/writer/writer.go | 183 +++++++++++
pkg/services/object/patch/service.go | 25 +-
pkg/services/object/patch/streamer.go | 28 +-
pkg/services/object/patch/util.go | 19 --
pkg/services/object/put/service.go | 114 ++-----
pkg/services/object/put/single.go | 64 ++--
pkg/services/object/put/streamer.go | 289 ++----------------
pkg/services/object/put/v2/streamer.go | 9 +-
pkg/services/replicator/process.go | 4 +-
pkg/services/replicator/replicator.go | 6 +-
22 files changed, 599 insertions(+), 585 deletions(-)
rename pkg/services/object/{put => common/target}/builder.go (98%)
rename pkg/services/object/{put => common/target}/pool.go (96%)
create mode 100644 pkg/services/object/common/target/target.go
rename pkg/services/object/{put => common/target}/validation.go (99%)
rename pkg/services/object/{put => common/writer}/common.go (65%)
rename pkg/services/object/{put/writer.go => common/writer/dispatcher.go} (97%)
rename pkg/services/object/{put => common/writer}/distributed.go (57%)
rename pkg/services/object/{put => common/writer}/ec.go (69%)
rename pkg/services/object/{put => common/writer}/local.go (81%)
rename pkg/services/object/{put => common/writer}/remote.go (92%)
create mode 100644 pkg/services/object/common/writer/writer.go
diff --git a/cmd/frostfs-node/cache.go b/cmd/frostfs-node/cache.go
index 81d552729..57f65d873 100644
--- a/cmd/frostfs-node/cache.go
+++ b/cmd/frostfs-node/cache.go
@@ -7,7 +7,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
- putsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/put"
+ objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer"
utilSync "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/sync"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
@@ -305,11 +305,11 @@ type ttlMaxObjectSizeCache struct {
mtx sync.RWMutex
lastUpdated time.Time
lastSize uint64
- src putsvc.MaxSizeSource
+ src objectwriter.MaxSizeSource
metrics cacheMetrics
}
-func newCachedMaxObjectSizeSource(src putsvc.MaxSizeSource) putsvc.MaxSizeSource {
+func newCachedMaxObjectSizeSource(src objectwriter.MaxSizeSource) objectwriter.MaxSizeSource {
return &ttlMaxObjectSizeCache{
src: src,
metrics: metrics.NewCacheMetrics("max_object_size"),
diff --git a/cmd/frostfs-node/object.go b/cmd/frostfs-node/object.go
index 467c5901b..610e2c363 100644
--- a/cmd/frostfs-node/object.go
+++ b/cmd/frostfs-node/object.go
@@ -24,6 +24,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/acl"
v2 "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/acl/v2"
objectAPE "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/ape"
+ objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer"
deletesvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/delete"
deletesvcV2 "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/delete/v2"
getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get"
@@ -188,7 +189,7 @@ func initObjectService(c *cfg) {
sDeleteV2 := createDeleteServiceV2(sDelete)
- sPatch := createPatchSvc(sGet, sPut, keyStorage)
+ sPatch := createPatchSvc(sGet, sPut)
// build service pipeline
// grpc | audit | | signature | response | acl | ape | split
@@ -326,7 +327,7 @@ func createReplicator(c *cfg, keyStorage *util.KeyStorage, cache *cache.ClientCa
),
replicator.WithLocalStorage(ls),
replicator.WithRemoteSender(
- putsvc.NewRemoteSender(keyStorage, cache),
+ objectwriter.NewRemoteSender(keyStorage, cache),
),
replicator.WithRemoteGetter(
getsvc.NewRemoteGetter(c.clientCache, c.netMapSource, keyStorage),
@@ -338,7 +339,7 @@ func createReplicator(c *cfg, keyStorage *util.KeyStorage, cache *cache.ClientCa
func createPutSvc(c *cfg, keyStorage *util.KeyStorage, irFetcher *cachedIRFetcher) *putsvc.Service {
ls := c.cfgObject.cfgLocalStorage.localStorage
- var os putsvc.ObjectStorage = engineWithoutNotifications{
+ var os objectwriter.ObjectStorage = engineWithoutNotifications{
engine: ls,
}
@@ -352,9 +353,9 @@ func createPutSvc(c *cfg, keyStorage *util.KeyStorage, irFetcher *cachedIRFetche
c,
c.cfgNetmap.state,
irFetcher,
- putsvc.WithWorkerPools(c.cfgObject.pool.putRemote, c.cfgObject.pool.putLocal),
- putsvc.WithLogger(c.log),
- putsvc.WithVerifySessionTokenIssuer(!c.cfgObject.skipSessionTokenIssuerVerification),
+ objectwriter.WithWorkerPools(c.cfgObject.pool.putRemote, c.cfgObject.pool.putLocal),
+ objectwriter.WithLogger(c.log),
+ objectwriter.WithVerifySessionTokenIssuer(!c.cfgObject.skipSessionTokenIssuerVerification),
)
}
@@ -362,8 +363,8 @@ func createPutSvcV2(sPut *putsvc.Service, keyStorage *util.KeyStorage) *putsvcV2
return putsvcV2.NewService(sPut, keyStorage)
}
-func createPatchSvc(sGet *getsvc.Service, sPut *putsvc.Service, keyStorage *util.KeyStorage) *patchsvc.Service {
- return patchsvc.NewService(keyStorage, sGet, sPut)
+func createPatchSvc(sGet *getsvc.Service, sPut *putsvc.Service) *patchsvc.Service {
+ return patchsvc.NewService(sPut.Config, sGet)
}
func createSearchSvc(c *cfg, keyStorage *util.KeyStorage, traverseGen *util.TraverserGenerator, coreConstructor *cache.ClientCache) *searchsvc.Service {
diff --git a/pkg/services/object/put/builder.go b/pkg/services/object/common/target/builder.go
similarity index 98%
rename from pkg/services/object/put/builder.go
rename to pkg/services/object/common/target/builder.go
index 64baf4e05..ea68365a7 100644
--- a/pkg/services/object/put/builder.go
+++ b/pkg/services/object/common/target/builder.go
@@ -1,4 +1,4 @@
-package putsvc
+package target
import (
"context"
diff --git a/pkg/services/object/put/pool.go b/pkg/services/object/common/target/pool.go
similarity index 96%
rename from pkg/services/object/put/pool.go
rename to pkg/services/object/common/target/pool.go
index ebe214caf..71da305ad 100644
--- a/pkg/services/object/put/pool.go
+++ b/pkg/services/object/common/target/pool.go
@@ -1,4 +1,4 @@
-package putsvc
+package target
import (
"sync"
diff --git a/pkg/services/object/common/target/target.go b/pkg/services/object/common/target/target.go
new file mode 100644
index 000000000..00080ace6
--- /dev/null
+++ b/pkg/services/object/common/target/target.go
@@ -0,0 +1,170 @@
+package target
+
+import (
+ "errors"
+ "fmt"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
+ objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
+ containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/transformer"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
+)
+
+func New(prm *objectwriter.Params) (transformer.ChunkedObjectWriter, error) {
+ // prepare needed put parameters
+ if err := preparePrm(prm); err != nil {
+ return nil, fmt.Errorf("could not prepare put parameters: %w", err)
+ }
+
+ if prm.Header.Signature() != nil {
+ return newUntrustedTarget(prm)
+ }
+ return newTrustedTarget(prm)
+}
+
+func newUntrustedTarget(prm *objectwriter.Params) (transformer.ChunkedObjectWriter, error) {
+ maxPayloadSz := prm.Config.MaxSizeSrc.MaxObjectSize()
+ if maxPayloadSz == 0 {
+ return nil, errors.New("could not obtain max object size parameter")
+ }
+
+ if prm.SignRequestPrivateKey == nil {
+ nodeKey, err := prm.Config.KeyStorage.GetKey(nil)
+ if err != nil {
+ return nil, err
+ }
+ prm.SignRequestPrivateKey = nodeKey
+ }
+
+ // prepare untrusted-Put object target
+ return &validatingPreparedTarget{
+ nextTarget: newInMemoryObjectBuilder(objectwriter.New(prm)),
+ fmt: prm.Config.FormatValidator,
+
+ maxPayloadSz: maxPayloadSz,
+ }, nil
+}
+
+func newTrustedTarget(prm *objectwriter.Params) (transformer.ChunkedObjectWriter, error) {
+ maxPayloadSz := prm.Config.MaxSizeSrc.MaxObjectSize()
+ if maxPayloadSz == 0 {
+ return nil, errors.New("could not obtain max object size parameter")
+ }
+
+ sToken := prm.Common.SessionToken()
+
+ // prepare trusted-Put object target
+
+ // get private token from local storage
+ var sessionInfo *util.SessionInfo
+
+ if sToken != nil {
+ sessionInfo = &util.SessionInfo{
+ ID: sToken.ID(),
+ Owner: sToken.Issuer(),
+ }
+ }
+
+ key, err := prm.Config.KeyStorage.GetKey(sessionInfo)
+ if err != nil {
+ return nil, fmt.Errorf("could not receive session key: %w", err)
+ }
+
+ // In case session token is missing, the line above returns the default key.
+ // If it isn't owner key, replication attempts will fail, thus this check.
+ ownerObj := prm.Header.OwnerID()
+ if ownerObj.IsEmpty() {
+ return nil, errors.New("missing object owner")
+ }
+
+ if sToken == nil {
+ var ownerSession user.ID
+ user.IDFromKey(&ownerSession, key.PublicKey)
+
+ if !ownerObj.Equals(ownerSession) {
+ return nil, errors.New("session token is missing but object owner id is different from the default key")
+ }
+ } else {
+ if !ownerObj.Equals(sessionInfo.Owner) {
+ return nil, fmt.Errorf("different token issuer and object owner identifiers %s/%s", sessionInfo.Owner, ownerObj)
+ }
+ }
+
+ if prm.SignRequestPrivateKey == nil {
+ prm.SignRequestPrivateKey = key
+ }
+
+ return &validatingTarget{
+ fmt: prm.Config.FormatValidator,
+ nextTarget: transformer.NewPayloadSizeLimiter(transformer.Params{
+ Key: key,
+ NextTargetInit: func() transformer.ObjectWriter { return objectwriter.New(prm) },
+ NetworkState: prm.Config.NetworkState,
+ MaxSize: maxPayloadSz,
+ WithoutHomomorphicHash: containerSDK.IsHomomorphicHashingDisabled(prm.Container),
+ SessionToken: sToken,
+ }),
+ }, nil
+}
+
+func preparePrm(prm *objectwriter.Params) error {
+ var err error
+
+ // get latest network map
+ nm, err := netmap.GetLatestNetworkMap(prm.Config.NetmapSource)
+ if err != nil {
+ //return fmt.Errorf("(%T) could not get latest network map: %w", p, err)
+ return fmt.Errorf("could not get latest network map: %w", err)
+ }
+
+ idCnr, ok := prm.Header.ContainerID()
+ if !ok {
+ return errors.New("missing container ID")
+ }
+
+ // get container to store the object
+ cnrInfo, err := prm.Config.ContainerSource.Get(idCnr)
+ if err != nil {
+ //return fmt.Errorf("(%T) could not get container by ID: %w", p, err)
+ return fmt.Errorf("could not get container by ID: %w", err)
+ }
+
+ prm.Container = cnrInfo.Value
+
+ // add common options
+ prm.TraverseOpts = append(prm.TraverseOpts,
+ // set processing container
+ placement.ForContainer(prm.Container),
+ )
+
+ if ech := prm.Header.ECHeader(); ech != nil {
+ prm.TraverseOpts = append(prm.TraverseOpts,
+ // set identifier of the processing object
+ placement.ForObject(ech.Parent()),
+ )
+ } else if id, ok := prm.Header.ID(); ok {
+ prm.TraverseOpts = append(prm.TraverseOpts,
+ // set identifier of the processing object
+ placement.ForObject(id),
+ )
+ }
+
+ // create placement builder from network map
+ builder := placement.NewNetworkMapBuilder(nm)
+
+ if prm.Common.LocalOnly() {
+ // restrict success count to 1 stored copy (to local storage)
+ prm.TraverseOpts = append(prm.TraverseOpts, placement.SuccessAfter(1))
+
+ // use local-only placement builder
+ builder = util.NewLocalPlacement(builder, prm.Config.NetmapKeys)
+ }
+
+ // set placement builder
+ prm.TraverseOpts = append(prm.TraverseOpts, placement.UseBuilder(builder))
+
+ return nil
+}
diff --git a/pkg/services/object/put/validation.go b/pkg/services/object/common/target/validation.go
similarity index 99%
rename from pkg/services/object/put/validation.go
rename to pkg/services/object/common/target/validation.go
index c2b078ef5..b29721d01 100644
--- a/pkg/services/object/put/validation.go
+++ b/pkg/services/object/common/target/validation.go
@@ -1,4 +1,4 @@
-package putsvc
+package target
import (
"bytes"
diff --git a/pkg/services/object/put/common.go b/pkg/services/object/common/writer/common.go
similarity index 65%
rename from pkg/services/object/put/common.go
rename to pkg/services/object/common/writer/common.go
index cbb7f5f33..6689557ee 100644
--- a/pkg/services/object/put/common.go
+++ b/pkg/services/object/common/writer/common.go
@@ -1,4 +1,4 @@
-package putsvc
+package writer
import (
"context"
@@ -13,23 +13,23 @@ import (
"go.uber.org/zap"
)
-type nodeIterator struct {
- traversal
- cfg *cfg
+type NodeIterator struct {
+ Traversal
+ cfg *Config
}
-func (c *cfg) newNodeIterator(opts []placement.Option) *nodeIterator {
- return &nodeIterator{
- traversal: traversal{
- opts: opts,
- mExclude: make(map[string]*bool),
+func (c *Config) NewNodeIterator(opts []placement.Option) *NodeIterator {
+ return &NodeIterator{
+ Traversal: Traversal{
+ Opts: opts,
+ Exclude: make(map[string]*bool),
},
cfg: c,
}
}
-func (n *nodeIterator) forEachNode(ctx context.Context, f func(context.Context, nodeDesc) error) error {
- traverser, err := placement.NewTraverser(n.traversal.opts...)
+func (n *NodeIterator) ForEachNode(ctx context.Context, f func(context.Context, NodeDescriptor) error) error {
+ traverser, err := placement.NewTraverser(n.Traversal.Opts...)
if err != nil {
return fmt.Errorf("could not create object placement traverser: %w", err)
}
@@ -56,10 +56,10 @@ func (n *nodeIterator) forEachNode(ctx context.Context, f func(context.Context,
}
// perform additional container broadcast if needed
- if n.traversal.submitPrimaryPlacementFinish() {
- err := n.forEachNode(ctx, f)
+ if n.Traversal.submitPrimaryPlacementFinish() {
+ err := n.ForEachNode(ctx, f)
if err != nil {
- n.cfg.log.Error(logs.PutAdditionalContainerBroadcastFailure, zap.Error(err))
+ n.cfg.Logger.Error(logs.PutAdditionalContainerBroadcastFailure, zap.Error(err))
// we don't fail primary operation because of broadcast failure
}
}
@@ -67,11 +67,11 @@ func (n *nodeIterator) forEachNode(ctx context.Context, f func(context.Context,
return nil
}
-func (n *nodeIterator) forEachAddress(ctx context.Context, traverser *placement.Traverser, addrs []placement.Node, f func(context.Context, nodeDesc) error, resErr *atomic.Value) bool {
+func (n *NodeIterator) forEachAddress(ctx context.Context, traverser *placement.Traverser, addrs []placement.Node, f func(context.Context, NodeDescriptor) error, resErr *atomic.Value) bool {
var wg sync.WaitGroup
for _, addr := range addrs {
- if ok := n.mExclude[string(addr.PublicKey())]; ok != nil {
+ if ok := n.Exclude[string(addr.PublicKey())]; ok != nil {
if *ok {
traverser.SubmitSuccess()
}
@@ -86,10 +86,10 @@ func (n *nodeIterator) forEachAddress(ctx context.Context, traverser *placement.
if err := workerPool.Submit(func() {
defer wg.Done()
- err := f(ctx, nodeDesc{local: isLocal, info: addr})
+ err := f(ctx, NodeDescriptor{Local: isLocal, Info: addr})
if err != nil {
resErr.Store(err)
- svcutil.LogServiceError(n.cfg.log, "PUT", addr.Addresses(), err)
+ svcutil.LogServiceError(n.cfg.Logger, "PUT", addr.Addresses(), err)
return
}
@@ -97,7 +97,7 @@ func (n *nodeIterator) forEachAddress(ctx context.Context, traverser *placement.
*item = true
}); err != nil {
wg.Done()
- svcutil.LogWorkerPoolError(n.cfg.log, "PUT", err)
+ svcutil.LogWorkerPoolError(n.cfg.Logger, "PUT", err)
return true
}
@@ -105,7 +105,7 @@ func (n *nodeIterator) forEachAddress(ctx context.Context, traverser *placement.
// in subsequent container broadcast. Note that we don't
// process this node during broadcast if primary placement
// on it failed.
- n.traversal.submitProcessed(addr, item)
+ n.Traversal.submitProcessed(addr, item)
}
wg.Wait()
@@ -113,6 +113,6 @@ func (n *nodeIterator) forEachAddress(ctx context.Context, traverser *placement.
return false
}
-func needAdditionalBroadcast(obj *objectSDK.Object, localOnly bool) bool {
+func NeedAdditionalBroadcast(obj *objectSDK.Object, localOnly bool) bool {
return len(obj.Children()) > 0 || (!localOnly && (obj.Type() == objectSDK.TypeTombstone || obj.Type() == objectSDK.TypeLock))
}
diff --git a/pkg/services/object/put/writer.go b/pkg/services/object/common/writer/dispatcher.go
similarity index 97%
rename from pkg/services/object/put/writer.go
rename to pkg/services/object/common/writer/dispatcher.go
index 53eee6006..bb9a54ce9 100644
--- a/pkg/services/object/put/writer.go
+++ b/pkg/services/object/common/writer/dispatcher.go
@@ -1,4 +1,4 @@
-package putsvc
+package writer
import (
"context"
diff --git a/pkg/services/object/put/distributed.go b/pkg/services/object/common/writer/distributed.go
similarity index 57%
rename from pkg/services/object/put/distributed.go
rename to pkg/services/object/common/writer/distributed.go
index 5176f7a54..f62934bed 100644
--- a/pkg/services/object/put/distributed.go
+++ b/pkg/services/object/common/writer/distributed.go
@@ -1,4 +1,4 @@
-package putsvc
+package writer
import (
"context"
@@ -13,47 +13,47 @@ type preparedObjectTarget interface {
WriteObject(context.Context, *objectSDK.Object, object.ContentMeta) error
}
-type distributedTarget struct {
+type distributedWriter struct {
+ cfg *Config
+
placementOpts []placement.Option
obj *objectSDK.Object
objMeta object.ContentMeta
- *cfg
+ nodeTargetInitializer func(NodeDescriptor) preparedObjectTarget
- nodeTargetInitializer func(nodeDesc) preparedObjectTarget
-
- relay func(context.Context, nodeDesc) error
+ relay func(context.Context, NodeDescriptor) error
resetSuccessAfterOnBroadcast bool
}
-// parameters and state of container traversal.
-type traversal struct {
- opts []placement.Option
+// parameters and state of container Traversal.
+type Traversal struct {
+ Opts []placement.Option
// need of additional broadcast after the object is saved
- extraBroadcastEnabled bool
+ ExtraBroadcastEnabled bool
// container nodes which was processed during the primary object placement
- mExclude map[string]*bool
+ Exclude map[string]*bool
- resetSuccessAfterOnBroadcast bool
+ ResetSuccessAfterOnBroadcast bool
}
// updates traversal parameters after the primary placement finish and
// returns true if additional container broadcast is needed.
-func (x *traversal) submitPrimaryPlacementFinish() bool {
- if x.extraBroadcastEnabled {
+func (x *Traversal) submitPrimaryPlacementFinish() bool {
+ if x.ExtraBroadcastEnabled {
// do not track success during container broadcast (best-effort)
- x.opts = append(x.opts, placement.WithoutSuccessTracking())
+ x.Opts = append(x.Opts, placement.WithoutSuccessTracking())
- if x.resetSuccessAfterOnBroadcast {
- x.opts = append(x.opts, placement.ResetSuccessAfter())
+ if x.ResetSuccessAfterOnBroadcast {
+ x.Opts = append(x.Opts, placement.ResetSuccessAfter())
}
// avoid 2nd broadcast
- x.extraBroadcastEnabled = false
+ x.ExtraBroadcastEnabled = false
return true
}
@@ -62,22 +62,22 @@ func (x *traversal) submitPrimaryPlacementFinish() bool {
}
// marks the container node as processed during the primary object placement.
-func (x *traversal) submitProcessed(n placement.Node, item *bool) {
- if x.extraBroadcastEnabled {
+func (x *Traversal) submitProcessed(n placement.Node, item *bool) {
+ if x.ExtraBroadcastEnabled {
key := string(n.PublicKey())
- if x.mExclude == nil {
- x.mExclude = make(map[string]*bool, 1)
+ if x.Exclude == nil {
+ x.Exclude = make(map[string]*bool, 1)
}
- x.mExclude[key] = item
+ x.Exclude[key] = item
}
}
-type nodeDesc struct {
- local bool
+type NodeDescriptor struct {
+ Local bool
- info placement.Node
+ Info placement.Node
}
// errIncompletePut is returned if processing on a container fails.
@@ -96,19 +96,19 @@ func (x errIncompletePut) Error() string {
}
// WriteObject implements the transformer.ObjectWriter interface.
-func (t *distributedTarget) WriteObject(ctx context.Context, obj *objectSDK.Object) error {
+func (t *distributedWriter) WriteObject(ctx context.Context, obj *objectSDK.Object) error {
t.obj = obj
var err error
- if t.objMeta, err = t.fmtValidator.ValidateContent(t.obj); err != nil {
+ if t.objMeta, err = t.cfg.FormatValidator.ValidateContent(t.obj); err != nil {
return fmt.Errorf("(%T) could not validate payload content: %w", t, err)
}
return t.iteratePlacement(ctx)
}
-func (t *distributedTarget) sendObject(ctx context.Context, node nodeDesc) error {
- if !node.local && t.relay != nil {
+func (t *distributedWriter) sendObject(ctx context.Context, node NodeDescriptor) error {
+ if !node.Local && t.relay != nil {
return t.relay(ctx, node)
}
@@ -121,11 +121,11 @@ func (t *distributedTarget) sendObject(ctx context.Context, node nodeDesc) error
return nil
}
-func (t *distributedTarget) iteratePlacement(ctx context.Context) error {
+func (t *distributedWriter) iteratePlacement(ctx context.Context) error {
id, _ := t.obj.ID()
- iter := t.cfg.newNodeIterator(append(t.placementOpts, placement.ForObject(id)))
- iter.extraBroadcastEnabled = needAdditionalBroadcast(t.obj, false /* Distributed target is for cluster-wide PUT */)
- iter.resetSuccessAfterOnBroadcast = t.resetSuccessAfterOnBroadcast
- return iter.forEachNode(ctx, t.sendObject)
+ iter := t.cfg.NewNodeIterator(append(t.placementOpts, placement.ForObject(id)))
+ iter.ExtraBroadcastEnabled = NeedAdditionalBroadcast(t.obj, false /* Distributed target is for cluster-wide PUT */)
+ iter.ResetSuccessAfterOnBroadcast = t.resetSuccessAfterOnBroadcast
+ return iter.ForEachNode(ctx, t.sendObject)
}
diff --git a/pkg/services/object/put/ec.go b/pkg/services/object/common/writer/ec.go
similarity index 69%
rename from pkg/services/object/put/ec.go
rename to pkg/services/object/common/writer/ec.go
index 9980f6d61..fb0a8e4e5 100644
--- a/pkg/services/object/put/ec.go
+++ b/pkg/services/object/common/writer/ec.go
@@ -1,4 +1,4 @@
-package putsvc
+package writer
import (
"context"
@@ -23,23 +23,23 @@ import (
"golang.org/x/sync/errgroup"
)
-var _ transformer.ObjectWriter = (*ecWriter)(nil)
+var _ transformer.ObjectWriter = (*ECWriter)(nil)
var errUnsupportedECObject = errors.New("object is not supported for erasure coding")
-type ecWriter struct {
- cfg *cfg
- placementOpts []placement.Option
- container containerSDK.Container
- key *ecdsa.PrivateKey
- commonPrm *svcutil.CommonPrm
- relay func(context.Context, client.NodeInfo, client.MultiAddressClient) error
+type ECWriter struct {
+ Config *Config
+ PlacementOpts []placement.Option
+ Container containerSDK.Container
+ Key *ecdsa.PrivateKey
+ CommonPrm *svcutil.CommonPrm
+ Relay func(context.Context, client.NodeInfo, client.MultiAddressClient) error
- objMeta object.ContentMeta
- objMetaValid bool
+ ObjectMeta object.ContentMeta
+ ObjectMetaValid bool
}
-func (e *ecWriter) WriteObject(ctx context.Context, obj *objectSDK.Object) error {
+func (e *ECWriter) WriteObject(ctx context.Context, obj *objectSDK.Object) error {
relayed, err := e.relayIfNotContainerNode(ctx, obj)
if err != nil {
return err
@@ -53,11 +53,11 @@ func (e *ecWriter) WriteObject(ctx context.Context, obj *objectSDK.Object) error
return errUnsupportedECObject
}
- if !e.objMetaValid {
- if e.objMeta, err = e.cfg.fmtValidator.ValidateContent(obj); err != nil {
+ if !e.ObjectMetaValid {
+ if e.ObjectMeta, err = e.Config.FormatValidator.ValidateContent(obj); err != nil {
return fmt.Errorf("(%T) could not validate payload content: %w", e, err)
}
- e.objMetaValid = true
+ e.ObjectMetaValid = true
}
if obj.ECHeader() != nil {
@@ -66,8 +66,8 @@ func (e *ecWriter) WriteObject(ctx context.Context, obj *objectSDK.Object) error
return e.writeRawObject(ctx, obj)
}
-func (e *ecWriter) relayIfNotContainerNode(ctx context.Context, obj *objectSDK.Object) (bool, error) {
- if e.relay == nil {
+func (e *ECWriter) relayIfNotContainerNode(ctx context.Context, obj *objectSDK.Object) (bool, error) {
+ if e.Relay == nil {
return false, nil
}
currentNodeIsContainerNode, err := e.currentNodeIsContainerNode()
@@ -90,8 +90,8 @@ func (e *ecWriter) relayIfNotContainerNode(ctx context.Context, obj *objectSDK.O
return true, nil
}
-func (e *ecWriter) currentNodeIsContainerNode() (bool, error) {
- t, err := placement.NewTraverser(e.placementOpts...)
+func (e *ECWriter) currentNodeIsContainerNode() (bool, error) {
+ t, err := placement.NewTraverser(e.PlacementOpts...)
if err != nil {
return false, err
}
@@ -101,7 +101,7 @@ func (e *ecWriter) currentNodeIsContainerNode() (bool, error) {
break
}
for _, node := range nodes {
- if e.cfg.netmapKeys.IsLocalKey(node.PublicKey()) {
+ if e.Config.NetmapKeys.IsLocalKey(node.PublicKey()) {
return true, nil
}
}
@@ -109,8 +109,8 @@ func (e *ecWriter) currentNodeIsContainerNode() (bool, error) {
return false, nil
}
-func (e *ecWriter) relayToContainerNode(ctx context.Context, objID oid.ID, index uint32) error {
- t, err := placement.NewTraverser(append(e.placementOpts, placement.ForObject(objID))...)
+func (e *ECWriter) relayToContainerNode(ctx context.Context, objID oid.ID, index uint32) error {
+ t, err := placement.NewTraverser(append(e.PlacementOpts, placement.ForObject(objID))...)
if err != nil {
return err
}
@@ -126,18 +126,18 @@ func (e *ecWriter) relayToContainerNode(ctx context.Context, objID oid.ID, index
var info client.NodeInfo
client.NodeInfoFromNetmapElement(&info, node)
- c, err := e.cfg.clientConstructor.Get(info)
+ c, err := e.Config.ClientConstructor.Get(info)
if err != nil {
return fmt.Errorf("could not create SDK client %s: %w", info.AddressGroup(), err)
}
completed := make(chan interface{})
- if poolErr := e.cfg.remotePool.Submit(func() {
+ if poolErr := e.Config.RemotePool.Submit(func() {
defer close(completed)
- err = e.relay(ctx, info, c)
+ err = e.Relay(ctx, info, c)
}); poolErr != nil {
close(completed)
- svcutil.LogWorkerPoolError(e.cfg.log, "PUT", poolErr)
+ svcutil.LogWorkerPoolError(e.Config.Logger, "PUT", poolErr)
return poolErr
}
<-completed
@@ -145,7 +145,7 @@ func (e *ecWriter) relayToContainerNode(ctx context.Context, objID oid.ID, index
if err == nil {
return nil
}
- e.cfg.log.Logger.Warn(logs.ECFailedToSendToContainerNode, zap.Stringers("address_group", info.AddressGroup()))
+ e.Config.Logger.Logger.Warn(logs.ECFailedToSendToContainerNode, zap.Stringers("address_group", info.AddressGroup()))
lastErr = err
}
}
@@ -157,12 +157,12 @@ func (e *ecWriter) relayToContainerNode(ctx context.Context, objID oid.ID, index
}
}
-func (e *ecWriter) writeECPart(ctx context.Context, obj *objectSDK.Object) error {
- if e.commonPrm.LocalOnly() {
+func (e *ECWriter) writeECPart(ctx context.Context, obj *objectSDK.Object) error {
+ if e.CommonPrm.LocalOnly() {
return e.writePartLocal(ctx, obj)
}
- t, err := placement.NewTraverser(append(e.placementOpts, placement.ForObject(obj.ECHeader().Parent()))...)
+ t, err := placement.NewTraverser(append(e.PlacementOpts, placement.ForObject(obj.ECHeader().Parent()))...)
if err != nil {
return err
}
@@ -187,18 +187,18 @@ func (e *ecWriter) writeECPart(ctx context.Context, obj *objectSDK.Object) error
return nil
}
-func (e *ecWriter) writeRawObject(ctx context.Context, obj *objectSDK.Object) error {
+func (e *ECWriter) writeRawObject(ctx context.Context, obj *objectSDK.Object) error {
// now only single EC policy is supported
- c, err := erasurecode.NewConstructor(policy.ECDataCount(e.container.PlacementPolicy()), policy.ECParityCount(e.container.PlacementPolicy()))
+ c, err := erasurecode.NewConstructor(policy.ECDataCount(e.Container.PlacementPolicy()), policy.ECParityCount(e.Container.PlacementPolicy()))
if err != nil {
return err
}
- parts, err := c.Split(obj, e.key)
+ parts, err := c.Split(obj, e.Key)
if err != nil {
return err
}
objID, _ := obj.ID()
- t, err := placement.NewTraverser(append(e.placementOpts, placement.ForObject(objID))...)
+ t, err := placement.NewTraverser(append(e.PlacementOpts, placement.ForObject(objID))...)
if err != nil {
return err
}
@@ -230,7 +230,7 @@ func (e *ecWriter) writeRawObject(ctx context.Context, obj *objectSDK.Object) er
return nil
}
-func (e *ecWriter) writePart(ctx context.Context, obj *objectSDK.Object, partIdx int, nodes []placement.Node, visited []atomic.Bool) error {
+func (e *ECWriter) writePart(ctx context.Context, obj *objectSDK.Object, partIdx int, nodes []placement.Node, visited []atomic.Bool) error {
select {
case <-ctx.Done():
return ctx.Err()
@@ -243,7 +243,7 @@ func (e *ecWriter) writePart(ctx context.Context, obj *objectSDK.Object, partIdx
if err == nil {
return nil
}
- e.cfg.log.Warn(logs.ECFailedToSaveECPart, zap.Stringer("part_address", object.AddressOf(obj)),
+ e.Config.Logger.Warn(logs.ECFailedToSaveECPart, zap.Stringer("part_address", object.AddressOf(obj)),
zap.Stringer("parent_address", obj.ECHeader().Parent()), zap.Int("part_index", partIdx),
zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Error(err))
@@ -267,7 +267,7 @@ func (e *ecWriter) writePart(ctx context.Context, obj *objectSDK.Object, partIdx
if err == nil {
return nil
}
- e.cfg.log.Warn(logs.ECFailedToSaveECPart, zap.Stringer("part_address", object.AddressOf(obj)),
+ e.Config.Logger.Warn(logs.ECFailedToSaveECPart, zap.Stringer("part_address", object.AddressOf(obj)),
zap.Stringer("parent_address", obj.ECHeader().Parent()), zap.Int("part_index", partIdx),
zap.String("node", hex.EncodeToString(node.PublicKey())),
zap.Error(err))
@@ -291,7 +291,7 @@ func (e *ecWriter) writePart(ctx context.Context, obj *objectSDK.Object, partIdx
if err == nil {
return nil
}
- e.cfg.log.Warn(logs.ECFailedToSaveECPart, zap.Stringer("part_address", object.AddressOf(obj)),
+ e.Config.Logger.Warn(logs.ECFailedToSaveECPart, zap.Stringer("part_address", object.AddressOf(obj)),
zap.Stringer("parent_address", obj.ECHeader().Parent()), zap.Int("part_index", partIdx),
zap.String("node", hex.EncodeToString(node.PublicKey())),
zap.Error(err))
@@ -300,22 +300,22 @@ func (e *ecWriter) writePart(ctx context.Context, obj *objectSDK.Object, partIdx
return fmt.Errorf("failed to save EC chunk %s to any node", object.AddressOf(obj))
}
-func (e *ecWriter) putECPartToNode(ctx context.Context, obj *objectSDK.Object, node placement.Node) error {
- if e.cfg.netmapKeys.IsLocalKey(node.PublicKey()) {
+func (e *ECWriter) putECPartToNode(ctx context.Context, obj *objectSDK.Object, node placement.Node) error {
+ if e.Config.NetmapKeys.IsLocalKey(node.PublicKey()) {
return e.writePartLocal(ctx, obj)
}
return e.writePartRemote(ctx, obj, node)
}
-func (e *ecWriter) writePartLocal(ctx context.Context, obj *objectSDK.Object) error {
+func (e *ECWriter) writePartLocal(ctx context.Context, obj *objectSDK.Object) error {
var err error
- localTarget := localTarget{
- storage: e.cfg.localStore,
+ localTarget := LocalTarget{
+ Storage: e.Config.LocalStore,
}
completed := make(chan interface{})
- if poolErr := e.cfg.localPool.Submit(func() {
+ if poolErr := e.Config.LocalPool.Submit(func() {
defer close(completed)
- err = localTarget.WriteObject(ctx, obj, e.objMeta)
+ err = localTarget.WriteObject(ctx, obj, e.ObjectMeta)
}); poolErr != nil {
close(completed)
return poolErr
@@ -324,22 +324,22 @@ func (e *ecWriter) writePartLocal(ctx context.Context, obj *objectSDK.Object) er
return err
}
-func (e *ecWriter) writePartRemote(ctx context.Context, obj *objectSDK.Object, node placement.Node) error {
+func (e *ECWriter) writePartRemote(ctx context.Context, obj *objectSDK.Object, node placement.Node) error {
var clientNodeInfo client.NodeInfo
client.NodeInfoFromNetmapElement(&clientNodeInfo, node)
- remoteTaget := remoteTarget{
- privateKey: e.key,
- clientConstructor: e.cfg.clientConstructor,
- commonPrm: e.commonPrm,
+ remoteTaget := remoteWriter{
+ privateKey: e.Key,
+ clientConstructor: e.Config.ClientConstructor,
+ commonPrm: e.CommonPrm,
nodeInfo: clientNodeInfo,
}
var err error
completed := make(chan interface{})
- if poolErr := e.cfg.remotePool.Submit(func() {
+ if poolErr := e.Config.RemotePool.Submit(func() {
defer close(completed)
- err = remoteTaget.WriteObject(ctx, obj, e.objMeta)
+ err = remoteTaget.WriteObject(ctx, obj, e.ObjectMeta)
}); poolErr != nil {
close(completed)
return poolErr
diff --git a/pkg/services/object/put/local.go b/pkg/services/object/common/writer/local.go
similarity index 81%
rename from pkg/services/object/put/local.go
rename to pkg/services/object/common/writer/local.go
index 54649adc7..02fd25b9e 100644
--- a/pkg/services/object/put/local.go
+++ b/pkg/services/object/common/writer/local.go
@@ -1,4 +1,4 @@
-package putsvc
+package writer
import (
"context"
@@ -24,19 +24,19 @@ type ObjectStorage interface {
IsLocked(context.Context, oid.Address) (bool, error)
}
-type localTarget struct {
- storage ObjectStorage
+type LocalTarget struct {
+ Storage ObjectStorage
}
-func (t localTarget) WriteObject(ctx context.Context, obj *objectSDK.Object, meta objectCore.ContentMeta) error {
+func (t LocalTarget) WriteObject(ctx context.Context, obj *objectSDK.Object, meta objectCore.ContentMeta) error {
switch meta.Type() {
case objectSDK.TypeTombstone:
- err := t.storage.Delete(ctx, objectCore.AddressOf(obj), meta.Objects())
+ err := t.Storage.Delete(ctx, objectCore.AddressOf(obj), meta.Objects())
if err != nil {
return fmt.Errorf("could not delete objects from tombstone locally: %w", err)
}
case objectSDK.TypeLock:
- err := t.storage.Lock(ctx, objectCore.AddressOf(obj), meta.Objects())
+ err := t.Storage.Lock(ctx, objectCore.AddressOf(obj), meta.Objects())
if err != nil {
return fmt.Errorf("could not lock object from lock objects locally: %w", err)
}
@@ -44,7 +44,7 @@ func (t localTarget) WriteObject(ctx context.Context, obj *objectSDK.Object, met
// objects that do not change meta storage
}
- if err := t.storage.Put(ctx, obj); err != nil {
+ if err := t.Storage.Put(ctx, obj); err != nil {
return fmt.Errorf("(%T) could not put object to local storage: %w", t, err)
}
return nil
diff --git a/pkg/services/object/put/remote.go b/pkg/services/object/common/writer/remote.go
similarity index 92%
rename from pkg/services/object/put/remote.go
rename to pkg/services/object/common/writer/remote.go
index ee8d64e7a..697613ff7 100644
--- a/pkg/services/object/put/remote.go
+++ b/pkg/services/object/common/writer/remote.go
@@ -1,4 +1,4 @@
-package putsvc
+package writer
import (
"context"
@@ -16,7 +16,7 @@ import (
"google.golang.org/grpc/status"
)
-type remoteTarget struct {
+type remoteWriter struct {
privateKey *ecdsa.PrivateKey
commonPrm *util.CommonPrm
@@ -41,7 +41,7 @@ type RemotePutPrm struct {
obj *objectSDK.Object
}
-func (t *remoteTarget) WriteObject(ctx context.Context, obj *objectSDK.Object, _ objectcore.ContentMeta) error {
+func (t *remoteWriter) WriteObject(ctx context.Context, obj *objectSDK.Object, _ objectcore.ContentMeta) error {
c, err := t.clientConstructor.Get(t.nodeInfo)
if err != nil {
return fmt.Errorf("(%T) could not create SDK client %s: %w", t, t.nodeInfo, err)
@@ -64,7 +64,7 @@ func (t *remoteTarget) WriteObject(ctx context.Context, obj *objectSDK.Object, _
return t.putStream(ctx, prm)
}
-func (t *remoteTarget) putStream(ctx context.Context, prm internalclient.PutObjectPrm) error {
+func (t *remoteWriter) putStream(ctx context.Context, prm internalclient.PutObjectPrm) error {
_, err := internalclient.PutObject(ctx, prm)
if err != nil {
return fmt.Errorf("(%T) could not put object to %s: %w", t, t.nodeInfo.AddressGroup(), err)
@@ -72,7 +72,7 @@ func (t *remoteTarget) putStream(ctx context.Context, prm internalclient.PutObje
return nil
}
-func (t *remoteTarget) putSingle(ctx context.Context, prm internalclient.PutObjectPrm) error {
+func (t *remoteWriter) putSingle(ctx context.Context, prm internalclient.PutObjectPrm) error {
_, err := internalclient.PutObjectSingle(ctx, prm)
if err != nil {
return fmt.Errorf("(%T) could not put single object to %s: %w", t, t.nodeInfo.AddressGroup(), err)
@@ -113,7 +113,7 @@ func (s *RemoteSender) PutObject(ctx context.Context, p *RemotePutPrm) error {
return err
}
- t := &remoteTarget{
+ t := &remoteWriter{
privateKey: key,
clientConstructor: s.clientConstructor,
}
diff --git a/pkg/services/object/common/writer/writer.go b/pkg/services/object/common/writer/writer.go
new file mode 100644
index 000000000..3d50da988
--- /dev/null
+++ b/pkg/services/object/common/writer/writer.go
@@ -0,0 +1,183 @@
+package writer
+
+import (
+ "context"
+ "crypto/ecdsa"
+ "fmt"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/policy"
+ objutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/transformer"
+)
+
+type MaxSizeSource interface {
+ // MaxObjectSize returns maximum payload size
+ // of physically stored object in system.
+ //
+ // Must return 0 if value can not be obtained.
+ MaxObjectSize() uint64
+}
+
+type ClientConstructor interface {
+ Get(client.NodeInfo) (client.MultiAddressClient, error)
+}
+
+type InnerRing interface {
+ InnerRingKeys() ([][]byte, error)
+}
+
+type FormatValidatorConfig interface {
+ VerifySessionTokenIssuer() bool
+}
+
+// Config represents a set of static parameters that are established during
+// the initialization phase of all services.
+type Config struct {
+ KeyStorage *objutil.KeyStorage
+
+ MaxSizeSrc MaxSizeSource
+
+ LocalStore ObjectStorage
+
+ ContainerSource container.Source
+
+ NetmapSource netmap.Source
+
+ RemotePool, LocalPool util.WorkerPool
+
+ NetmapKeys netmap.AnnouncedKeys
+
+ FormatValidator *object.FormatValidator
+
+ NetworkState netmap.State
+
+ ClientConstructor ClientConstructor
+
+ Logger *logger.Logger
+
+ VerifySessionTokenIssuer bool
+}
+
+type Option func(*Config)
+
+func WithWorkerPools(remote, local util.WorkerPool) Option {
+ return func(c *Config) {
+ c.RemotePool, c.LocalPool = remote, local
+ }
+}
+
+func WithLogger(l *logger.Logger) Option {
+ return func(c *Config) {
+ c.Logger = l
+ }
+}
+
+func WithVerifySessionTokenIssuer(v bool) Option {
+ return func(c *Config) {
+ c.VerifySessionTokenIssuer = v
+ }
+}
+
+func (c *Config) getWorkerPool(pub []byte) (util.WorkerPool, bool) {
+ if c.NetmapKeys.IsLocalKey(pub) {
+ return c.LocalPool, true
+ }
+ return c.RemotePool, false
+}
+
+type Params struct {
+ Config *Config
+
+ Common *objutil.CommonPrm
+
+ Header *objectSDK.Object
+
+ Container containerSDK.Container
+
+ TraverseOpts []placement.Option
+
+ Relay func(context.Context, client.NodeInfo, client.MultiAddressClient) error
+
+ SignRequestPrivateKey *ecdsa.PrivateKey
+}
+
+func New(prm *Params) transformer.ObjectWriter {
+ if container.IsECContainer(prm.Container) && object.IsECSupported(prm.Header) {
+ return newECWriter(prm)
+ }
+ return newDefaultObjectWriter(prm, false)
+}
+
+func newDefaultObjectWriter(prm *Params, forECPlacement bool) transformer.ObjectWriter {
+ var relay func(context.Context, NodeDescriptor) error
+ if prm.Relay != nil {
+ relay = func(ctx context.Context, node NodeDescriptor) error {
+ var info client.NodeInfo
+
+ client.NodeInfoFromNetmapElement(&info, node.Info)
+
+ c, err := prm.Config.ClientConstructor.Get(info)
+ if err != nil {
+ return fmt.Errorf("could not create SDK client %s: %w", info.AddressGroup(), err)
+ }
+
+ return prm.Relay(ctx, info, c)
+ }
+ }
+
+ var resetSuccessAfterOnBroadcast bool
+ traverseOpts := prm.TraverseOpts
+ if forECPlacement && !prm.Common.LocalOnly() {
+ // save non-regular and linking object to EC container.
+ // EC 2.1 -> REP 2, EC 2.2 -> REP 3 etc.
+ traverseOpts = append(traverseOpts, placement.SuccessAfter(uint32(policy.ECParityCount(prm.Container.PlacementPolicy())+1)))
+ resetSuccessAfterOnBroadcast = true
+ }
+
+ return &distributedWriter{
+ cfg: prm.Config,
+ placementOpts: traverseOpts,
+ resetSuccessAfterOnBroadcast: resetSuccessAfterOnBroadcast,
+ nodeTargetInitializer: func(node NodeDescriptor) preparedObjectTarget {
+ if node.Local {
+ return LocalTarget{
+ Storage: prm.Config.LocalStore,
+ }
+ }
+
+ rt := &remoteWriter{
+ privateKey: prm.SignRequestPrivateKey,
+ commonPrm: prm.Common,
+ clientConstructor: prm.Config.ClientConstructor,
+ }
+
+ client.NodeInfoFromNetmapElement(&rt.nodeInfo, node.Info)
+
+ return rt
+ },
+ relay: relay,
+ }
+}
+
+func newECWriter(prm *Params) transformer.ObjectWriter {
+ return &objectWriterDispatcher{
+ ecWriter: &ECWriter{
+ Config: prm.Config,
+ PlacementOpts: append(prm.TraverseOpts, placement.WithCopyNumbers(nil)), // copies number ignored for EC
+ Container: prm.Container,
+ Key: prm.SignRequestPrivateKey,
+ CommonPrm: prm.Common,
+ Relay: prm.Relay,
+ },
+ repWriter: newDefaultObjectWriter(prm, true),
+ }
+}
diff --git a/pkg/services/object/patch/service.go b/pkg/services/object/patch/service.go
index c4ab15abf..f1082dfff 100644
--- a/pkg/services/object/patch/service.go
+++ b/pkg/services/object/patch/service.go
@@ -2,43 +2,40 @@ package patchsvc
import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
+ objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer"
getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get"
- putsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/put"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
)
// Service implements Put operation of Object service v2.
type Service struct {
- keyStorage *util.KeyStorage
+ *objectwriter.Config
getSvc *getsvc.Service
-
- putSvc *putsvc.Service
}
// NewService constructs Service instance from provided options.
-func NewService(ks *util.KeyStorage, getSvc *getsvc.Service, putSvc *putsvc.Service) *Service {
+//
+// Patch service can use the same objectwriter.Config initializied by Put service.
+func NewService(cfg *objectwriter.Config,
+ getSvc *getsvc.Service,
+) *Service {
return &Service{
- keyStorage: ks,
+ Config: cfg,
getSvc: getSvc,
-
- putSvc: putSvc,
}
}
// Put calls internal service and returns v2 object streamer.
func (s *Service) Patch() (object.PatchObjectStream, error) {
- nodeKey, err := s.keyStorage.GetKey(nil)
+ nodeKey, err := s.Config.KeyStorage.GetKey(nil)
if err != nil {
return nil, err
}
return &Streamer{
- getSvc: s.getSvc,
-
- putSvc: s.putSvc,
-
+ Config: s.Config,
+ getSvc: s.getSvc,
localNodeKey: nodeKey,
}, nil
}
diff --git a/pkg/services/object/patch/streamer.go b/pkg/services/object/patch/streamer.go
index 84363530e..85c28cda0 100644
--- a/pkg/services/object/patch/streamer.go
+++ b/pkg/services/object/patch/streamer.go
@@ -9,8 +9,9 @@ import (
objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
refsV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/target"
+ objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer"
getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get"
- putsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/put"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@@ -21,6 +22,8 @@ import (
// Streamer for the patch handler is a pipeline that merges two incoming streams of patches
// and original object payload chunks. The merged result is fed to Put stream target.
type Streamer struct {
+ *objectwriter.Config
+
// Patcher must be initialized at first Streamer.Send call.
patcher patcher.PatchApplier
@@ -28,8 +31,6 @@ type Streamer struct {
getSvc *getsvc.Service
- putSvc *putsvc.Service
-
localNodeKey *ecdsa.PrivateKey
}
@@ -78,11 +79,6 @@ func (s *Streamer) init(ctx context.Context, req *objectV2.PatchRequest) error {
localNodeKey: s.localNodeKey,
}
- putstm, err := s.putSvc.Put()
- if err != nil {
- return err
- }
-
hdr := hdrWithSig.GetHeader()
oV2 := new(objectV2.Object)
hV2 := new(objectV2.Header)
@@ -97,14 +93,14 @@ func (s *Streamer) init(ctx context.Context, req *objectV2.PatchRequest) error {
}
oV2.GetHeader().SetOwnerID(ownerID)
- prm, err := s.putInitPrm(req, oV2)
+ target, err := target.New(&objectwriter.Params{
+ Config: s.Config,
+ Common: commonPrm,
+ Header: objectSDK.NewFromV2(oV2),
+ SignRequestPrivateKey: s.localNodeKey,
+ })
if err != nil {
- return err
- }
-
- err = putstm.Init(ctx, prm)
- if err != nil {
- return err
+ return fmt.Errorf("target creation: %w", err)
}
patcherPrm := patcher.Params{
@@ -112,7 +108,7 @@ func (s *Streamer) init(ctx context.Context, req *objectV2.PatchRequest) error {
RangeProvider: rangeProvider,
- ObjectWriter: putstm.Target(),
+ ObjectWriter: target,
}
s.patcher = patcher.New(patcherPrm)
diff --git a/pkg/services/object/patch/util.go b/pkg/services/object/patch/util.go
index 1218d6694..4f3c3ef17 100644
--- a/pkg/services/object/patch/util.go
+++ b/pkg/services/object/patch/util.go
@@ -6,31 +6,12 @@ import (
"errors"
"fmt"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
- putsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/put"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
)
-// putInitPrm initializes put paramerer for Put stream.
-func (s *Streamer) putInitPrm(req *objectV2.PatchRequest, obj *objectV2.Object) (*putsvc.PutInitPrm, error) {
- commonPrm, err := util.CommonPrmFromV2(req)
- if err != nil {
- return nil, err
- }
-
- prm := new(putsvc.PutInitPrm)
- prm.WithObject(objectSDK.NewFromV2(obj)).
- WithCommonPrm(commonPrm).
- WithPrivateKey(s.localNodeKey)
-
- return prm, nil
-}
-
func newOwnerID(vh *session.RequestVerificationHeader) (*refs.OwnerID, error) {
for vh.GetOrigin() != nil {
vh = vh.GetOrigin()
diff --git a/pkg/services/object/put/service.go b/pkg/services/object/put/service.go
index a93873738..8cf4f0d62 100644
--- a/pkg/services/object/put/service.go
+++ b/pkg/services/object/put/service.go
@@ -1,132 +1,66 @@
package putsvc
import (
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
+ objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer"
objutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"go.uber.org/zap"
)
-type MaxSizeSource interface {
- // MaxObjectSize returns maximum payload size
- // of physically stored object in system.
- //
- // Must return 0 if value can not be obtained.
- MaxObjectSize() uint64
-}
-
type Service struct {
- *cfg
-}
-
-type Option func(*cfg)
-
-type ClientConstructor interface {
- Get(client.NodeInfo) (client.MultiAddressClient, error)
-}
-
-type InnerRing interface {
- InnerRingKeys() ([][]byte, error)
-}
-
-type FormatValidatorConfig interface {
- VerifySessionTokenIssuer() bool
-}
-
-type cfg struct {
- keyStorage *objutil.KeyStorage
-
- maxSizeSrc MaxSizeSource
-
- localStore ObjectStorage
-
- cnrSrc container.Source
-
- netMapSrc netmap.Source
-
- remotePool, localPool util.WorkerPool
-
- netmapKeys netmap.AnnouncedKeys
-
- fmtValidator *object.FormatValidator
-
- networkState netmap.State
-
- clientConstructor ClientConstructor
-
- log *logger.Logger
-
- verifySessionTokenIssuer bool
+ *objectwriter.Config
}
func NewService(ks *objutil.KeyStorage,
- cc ClientConstructor,
- ms MaxSizeSource,
- os ObjectStorage,
+ cc objectwriter.ClientConstructor,
+ ms objectwriter.MaxSizeSource,
+ os objectwriter.ObjectStorage,
cs container.Source,
ns netmap.Source,
nk netmap.AnnouncedKeys,
nst netmap.State,
- ir InnerRing,
- opts ...Option,
+ ir objectwriter.InnerRing,
+ opts ...objectwriter.Option,
) *Service {
- c := &cfg{
- remotePool: util.NewPseudoWorkerPool(),
- localPool: util.NewPseudoWorkerPool(),
- log: &logger.Logger{Logger: zap.L()},
- keyStorage: ks,
- clientConstructor: cc,
- maxSizeSrc: ms,
- localStore: os,
- cnrSrc: cs,
- netMapSrc: ns,
- netmapKeys: nk,
- networkState: nst,
+ c := &objectwriter.Config{
+ RemotePool: util.NewPseudoWorkerPool(),
+ LocalPool: util.NewPseudoWorkerPool(),
+ Logger: &logger.Logger{Logger: zap.L()},
+ KeyStorage: ks,
+ ClientConstructor: cc,
+ MaxSizeSrc: ms,
+ LocalStore: os,
+ ContainerSource: cs,
+ NetmapSource: ns,
+ NetmapKeys: nk,
+ NetworkState: nst,
}
for i := range opts {
opts[i](c)
}
- c.fmtValidator = object.NewFormatValidator(
+ c.FormatValidator = object.NewFormatValidator(
object.WithLockSource(os),
object.WithNetState(nst),
object.WithInnerRing(ir),
object.WithNetmapSource(ns),
object.WithContainersSource(cs),
- object.WithVerifySessionTokenIssuer(c.verifySessionTokenIssuer),
- object.WithLogger(c.log),
+ object.WithVerifySessionTokenIssuer(c.VerifySessionTokenIssuer),
+ object.WithLogger(c.Logger),
)
return &Service{
- cfg: c,
+ Config: c,
}
}
func (p *Service) Put() (*Streamer, error) {
return &Streamer{
- cfg: p.cfg,
+ Config: p.Config,
}, nil
}
-
-func WithWorkerPools(remote, local util.WorkerPool) Option {
- return func(c *cfg) {
- c.remotePool, c.localPool = remote, local
- }
-}
-
-func WithLogger(l *logger.Logger) Option {
- return func(c *cfg) {
- c.log = l
- }
-}
-
-func WithVerifySessionTokenIssuer(v bool) Option {
- return func(c *cfg) {
- c.verifySessionTokenIssuer = v
- }
-}
diff --git a/pkg/services/object/put/single.go b/pkg/services/object/put/single.go
index 3cc8518f5..9b4163268 100644
--- a/pkg/services/object/put/single.go
+++ b/pkg/services/object/put/single.go
@@ -21,6 +21,8 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/policy"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/target"
+ objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal"
svcutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
@@ -97,12 +99,12 @@ func (s *Service) validatePutSingle(ctx context.Context, obj *objectSDK.Object)
func (s *Service) validarePutSingleSize(obj *objectSDK.Object) error {
if uint64(len(obj.Payload())) != obj.PayloadSize() {
- return ErrWrongPayloadSize
+ return target.ErrWrongPayloadSize
}
- maxAllowedSize := s.maxSizeSrc.MaxObjectSize()
+ maxAllowedSize := s.Config.MaxSizeSrc.MaxObjectSize()
if obj.PayloadSize() > maxAllowedSize {
- return ErrExceedingMaxSize
+ return target.ErrExceedingMaxSize
}
return nil
@@ -137,11 +139,11 @@ func (s *Service) validatePutSingleChecksum(obj *objectSDK.Object) error {
}
func (s *Service) validatePutSingleObject(ctx context.Context, obj *objectSDK.Object) (object.ContentMeta, error) {
- if err := s.fmtValidator.Validate(ctx, obj, false); err != nil {
+ if err := s.FormatValidator.Validate(ctx, obj, false); err != nil {
return object.ContentMeta{}, fmt.Errorf("coud not validate object format: %w", err)
}
- meta, err := s.fmtValidator.ValidateContent(obj)
+ meta, err := s.FormatValidator.ValidateContent(obj)
if err != nil {
return object.ContentMeta{}, fmt.Errorf("could not validate payload content: %w", err)
}
@@ -164,17 +166,17 @@ func (s *Service) saveToNodes(ctx context.Context, obj *objectSDK.Object, req *o
}
func (s *Service) saveToREPReplicas(ctx context.Context, placement putSinglePlacement, obj *objectSDK.Object, localOnly bool, req *objectAPI.PutSingleRequest, meta object.ContentMeta) error {
- iter := s.cfg.newNodeIterator(placement.placementOptions)
- iter.extraBroadcastEnabled = needAdditionalBroadcast(obj, localOnly)
- iter.resetSuccessAfterOnBroadcast = placement.resetSuccessAfterOnBroadcast
+ iter := s.Config.NewNodeIterator(placement.placementOptions)
+ iter.ExtraBroadcastEnabled = objectwriter.NeedAdditionalBroadcast(obj, localOnly)
+ iter.ResetSuccessAfterOnBroadcast = placement.resetSuccessAfterOnBroadcast
signer := &putSingleRequestSigner{
req: req,
- keyStorage: s.keyStorage,
+ keyStorage: s.Config.KeyStorage,
signer: &sync.Once{},
}
- return iter.forEachNode(ctx, func(ctx context.Context, nd nodeDesc) error {
+ return iter.ForEachNode(ctx, func(ctx context.Context, nd objectwriter.NodeDescriptor) error {
return s.saveToPlacementNode(ctx, &nd, obj, signer, meta)
})
}
@@ -184,25 +186,25 @@ func (s *Service) saveToECReplicas(ctx context.Context, placement putSinglePlace
if err != nil {
return err
}
- key, err := s.cfg.keyStorage.GetKey(nil)
+ key, err := s.Config.KeyStorage.GetKey(nil)
if err != nil {
return err
}
signer := &putSingleRequestSigner{
req: req,
- keyStorage: s.keyStorage,
+ keyStorage: s.Config.KeyStorage,
signer: &sync.Once{},
}
- w := ecWriter{
- cfg: s.cfg,
- placementOpts: placement.placementOptions,
- objMeta: meta,
- objMetaValid: true,
- commonPrm: commonPrm,
- container: placement.container,
- key: key,
- relay: func(ctx context.Context, ni client.NodeInfo, mac client.MultiAddressClient) error {
+ w := objectwriter.ECWriter{
+ Config: s.Config,
+ PlacementOpts: placement.placementOptions,
+ ObjectMeta: meta,
+ ObjectMetaValid: true,
+ CommonPrm: commonPrm,
+ Container: placement.container,
+ Key: key,
+ Relay: func(ctx context.Context, ni client.NodeInfo, mac client.MultiAddressClient) error {
return s.redirectPutSingleRequest(ctx, signer, obj, ni, mac)
},
}
@@ -223,7 +225,7 @@ func (s *Service) getPutSinglePlacementOptions(obj *objectSDK.Object, copiesNumb
if !ok {
return result, errors.New("missing container ID")
}
- cnrInfo, err := s.cnrSrc.Get(cnrID)
+ cnrInfo, err := s.Config.ContainerSource.Get(cnrID)
if err != nil {
return result, fmt.Errorf("could not get container by ID: %w", err)
}
@@ -247,31 +249,31 @@ func (s *Service) getPutSinglePlacementOptions(obj *objectSDK.Object, copiesNumb
}
result.placementOptions = append(result.placementOptions, placement.ForObject(objID))
- latestNetmap, err := netmap.GetLatestNetworkMap(s.netMapSrc)
+ latestNetmap, err := netmap.GetLatestNetworkMap(s.Config.NetmapSource)
if err != nil {
return result, fmt.Errorf("could not get latest network map: %w", err)
}
builder := placement.NewNetworkMapBuilder(latestNetmap)
if localOnly {
result.placementOptions = append(result.placementOptions, placement.SuccessAfter(1))
- builder = svcutil.NewLocalPlacement(builder, s.netmapKeys)
+ builder = svcutil.NewLocalPlacement(builder, s.Config.NetmapKeys)
}
result.placementOptions = append(result.placementOptions, placement.UseBuilder(builder))
return result, nil
}
-func (s *Service) saveToPlacementNode(ctx context.Context, nodeDesc *nodeDesc, obj *objectSDK.Object,
+func (s *Service) saveToPlacementNode(ctx context.Context, nodeDesc *objectwriter.NodeDescriptor, obj *objectSDK.Object,
signer *putSingleRequestSigner, meta object.ContentMeta,
) error {
- if nodeDesc.local {
+ if nodeDesc.Local {
return s.saveLocal(ctx, obj, meta)
}
var info client.NodeInfo
- client.NodeInfoFromNetmapElement(&info, nodeDesc.info)
+ client.NodeInfoFromNetmapElement(&info, nodeDesc.Info)
- c, err := s.clientConstructor.Get(info)
+ c, err := s.Config.ClientConstructor.Get(info)
if err != nil {
return fmt.Errorf("could not create SDK client %s: %w", info.AddressGroup(), err)
}
@@ -280,8 +282,8 @@ func (s *Service) saveToPlacementNode(ctx context.Context, nodeDesc *nodeDesc, o
}
func (s *Service) saveLocal(ctx context.Context, obj *objectSDK.Object, meta object.ContentMeta) error {
- localTarget := &localTarget{
- storage: s.localStore,
+ localTarget := &objectwriter.LocalTarget{
+ Storage: s.Config.LocalStore,
}
return localTarget.WriteObject(ctx, obj, meta)
}
@@ -314,7 +316,7 @@ func (s *Service) redirectPutSingleRequest(ctx context.Context,
if err != nil {
objID, _ := obj.ID()
cnrID, _ := obj.ContainerID()
- s.log.Warn(logs.PutSingleRedirectFailure,
+ s.Config.Logger.Warn(logs.PutSingleRedirectFailure,
zap.Error(err),
zap.Stringer("address", addr),
zap.Stringer("object_id", objID),
diff --git a/pkg/services/object/put/streamer.go b/pkg/services/object/put/streamer.go
index 6b396ec96..f3803d433 100644
--- a/pkg/services/object/put/streamer.go
+++ b/pkg/services/object/put/streamer.go
@@ -2,33 +2,21 @@ package putsvc
import (
"context"
- "crypto/ecdsa"
"errors"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/policy"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
- pkgutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
- containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/target"
+ objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/transformer"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
)
type Streamer struct {
- *cfg
-
- privateKey *ecdsa.PrivateKey
+ *objectwriter.Config
target transformer.ChunkedObjectWriter
relay func(context.Context, client.NodeInfo, client.MultiAddressClient) error
-
- maxPayloadSz uint64 // network config
}
var errNotInit = errors.New("stream not initialized")
@@ -36,8 +24,23 @@ var errNotInit = errors.New("stream not initialized")
var errInitRecall = errors.New("init recall")
func (p *Streamer) Init(ctx context.Context, prm *PutInitPrm) error {
+ if p.target != nil {
+ return errInitRecall
+ }
+
// initialize destination target
- if err := p.initTarget(prm); err != nil {
+ prmTarget := &objectwriter.Params{
+ Config: p.Config,
+ Common: prm.common,
+ Header: prm.hdr,
+ Container: prm.cnr,
+ TraverseOpts: prm.traverseOpts,
+ Relay: p.relay,
+ }
+
+ var err error
+ p.target, err = target.New(prmTarget)
+ if err != nil {
return fmt.Errorf("(%T) could not initialize object target: %w", p, err)
}
@@ -47,253 +50,6 @@ func (p *Streamer) Init(ctx context.Context, prm *PutInitPrm) error {
return nil
}
-// Target accesses underlying target chunked object writer.
-func (p *Streamer) Target() transformer.ChunkedObjectWriter {
- return p.target
-}
-
-// MaxObjectSize returns maximum payload size for the streaming session.
-//
-// Must be called after the successful Init.
-func (p *Streamer) MaxObjectSize() uint64 {
- return p.maxPayloadSz
-}
-
-func (p *Streamer) initTarget(prm *PutInitPrm) error {
- // prevent re-calling
- if p.target != nil {
- return errInitRecall
- }
-
- // prepare needed put parameters
- if err := p.preparePrm(prm); err != nil {
- return fmt.Errorf("(%T) could not prepare put parameters: %w", p, err)
- }
-
- p.maxPayloadSz = p.maxSizeSrc.MaxObjectSize()
- if p.maxPayloadSz == 0 {
- return fmt.Errorf("(%T) could not obtain max object size parameter", p)
- }
-
- if prm.hdr.Signature() != nil {
- return p.initUntrustedTarget(prm)
- }
- return p.initTrustedTarget(prm)
-}
-
-func (p *Streamer) initUntrustedTarget(prm *PutInitPrm) error {
- p.relay = prm.relay
-
- if prm.privateKey != nil {
- p.privateKey = prm.privateKey
- } else {
- nodeKey, err := p.cfg.keyStorage.GetKey(nil)
- if err != nil {
- return err
- }
- p.privateKey = nodeKey
- }
-
- // prepare untrusted-Put object target
- p.target = &validatingPreparedTarget{
- nextTarget: newInMemoryObjectBuilder(p.newObjectWriter(prm)),
- fmt: p.fmtValidator,
-
- maxPayloadSz: p.maxPayloadSz,
- }
-
- return nil
-}
-
-func (p *Streamer) initTrustedTarget(prm *PutInitPrm) error {
- sToken := prm.common.SessionToken()
-
- // prepare trusted-Put object target
-
- // get private token from local storage
- var sessionInfo *util.SessionInfo
-
- if sToken != nil {
- sessionInfo = &util.SessionInfo{
- ID: sToken.ID(),
- Owner: sToken.Issuer(),
- }
- }
-
- key, err := p.keyStorage.GetKey(sessionInfo)
- if err != nil {
- return fmt.Errorf("(%T) could not receive session key: %w", p, err)
- }
-
- // In case session token is missing, the line above returns the default key.
- // If it isn't owner key, replication attempts will fail, thus this check.
- ownerObj := prm.hdr.OwnerID()
- if ownerObj.IsEmpty() {
- return errors.New("missing object owner")
- }
-
- if sToken == nil {
- var ownerSession user.ID
- user.IDFromKey(&ownerSession, key.PublicKey)
-
- if !ownerObj.Equals(ownerSession) {
- return fmt.Errorf("(%T) session token is missing but object owner id is different from the default key", p)
- }
- } else {
- if !ownerObj.Equals(sessionInfo.Owner) {
- return fmt.Errorf("(%T) different token issuer and object owner identifiers %s/%s", p, sessionInfo.Owner, ownerObj)
- }
- }
-
- if prm.privateKey != nil {
- p.privateKey = prm.privateKey
- } else {
- p.privateKey = key
- }
- p.target = &validatingTarget{
- fmt: p.fmtValidator,
- nextTarget: transformer.NewPayloadSizeLimiter(transformer.Params{
- Key: key,
- NextTargetInit: func() transformer.ObjectWriter { return p.newObjectWriter(prm) },
- NetworkState: p.networkState,
- MaxSize: p.maxPayloadSz,
- WithoutHomomorphicHash: containerSDK.IsHomomorphicHashingDisabled(prm.cnr),
- SessionToken: sToken,
- }),
- }
-
- return nil
-}
-
-func (p *Streamer) preparePrm(prm *PutInitPrm) error {
- var err error
-
- // get latest network map
- nm, err := netmap.GetLatestNetworkMap(p.netMapSrc)
- if err != nil {
- return fmt.Errorf("(%T) could not get latest network map: %w", p, err)
- }
-
- idCnr, ok := prm.hdr.ContainerID()
- if !ok {
- return errors.New("missing container ID")
- }
-
- // get container to store the object
- cnrInfo, err := p.cnrSrc.Get(idCnr)
- if err != nil {
- return fmt.Errorf("(%T) could not get container by ID: %w", p, err)
- }
-
- prm.cnr = cnrInfo.Value
-
- // add common options
- prm.traverseOpts = append(prm.traverseOpts,
- // set processing container
- placement.ForContainer(prm.cnr),
- )
-
- if ech := prm.hdr.ECHeader(); ech != nil {
- prm.traverseOpts = append(prm.traverseOpts,
- // set identifier of the processing object
- placement.ForObject(ech.Parent()),
- )
- } else if id, ok := prm.hdr.ID(); ok {
- prm.traverseOpts = append(prm.traverseOpts,
- // set identifier of the processing object
- placement.ForObject(id),
- )
- }
-
- // create placement builder from network map
- builder := placement.NewNetworkMapBuilder(nm)
-
- if prm.common.LocalOnly() {
- // restrict success count to 1 stored copy (to local storage)
- prm.traverseOpts = append(prm.traverseOpts, placement.SuccessAfter(1))
-
- // use local-only placement builder
- builder = util.NewLocalPlacement(builder, p.netmapKeys)
- }
-
- // set placement builder
- prm.traverseOpts = append(prm.traverseOpts, placement.UseBuilder(builder))
-
- return nil
-}
-
-func (p *Streamer) newObjectWriter(prm *PutInitPrm) transformer.ObjectWriter {
- if container.IsECContainer(prm.cnr) && object.IsECSupported(prm.hdr) {
- return p.newECWriter(prm)
- }
- return p.newDefaultObjectWriter(prm, false)
-}
-
-func (p *Streamer) newDefaultObjectWriter(prm *PutInitPrm, forECPlacement bool) transformer.ObjectWriter {
- var relay func(context.Context, nodeDesc) error
- if p.relay != nil {
- relay = func(ctx context.Context, node nodeDesc) error {
- var info client.NodeInfo
-
- client.NodeInfoFromNetmapElement(&info, node.info)
-
- c, err := p.clientConstructor.Get(info)
- if err != nil {
- return fmt.Errorf("could not create SDK client %s: %w", info.AddressGroup(), err)
- }
-
- return p.relay(ctx, info, c)
- }
- }
-
- var resetSuccessAfterOnBroadcast bool
- traverseOpts := prm.traverseOpts
- if forECPlacement && !prm.common.LocalOnly() {
- // save non-regular and linking object to EC container.
- // EC 2.1 -> REP 2, EC 2.2 -> REP 3 etc.
- traverseOpts = append(traverseOpts, placement.SuccessAfter(uint32(policy.ECParityCount(prm.cnr.PlacementPolicy())+1)))
- resetSuccessAfterOnBroadcast = true
- }
-
- return &distributedTarget{
- cfg: p.cfg,
- placementOpts: traverseOpts,
- resetSuccessAfterOnBroadcast: resetSuccessAfterOnBroadcast,
- nodeTargetInitializer: func(node nodeDesc) preparedObjectTarget {
- if node.local {
- return localTarget{
- storage: p.localStore,
- }
- }
-
- rt := &remoteTarget{
- privateKey: p.privateKey,
- commonPrm: prm.common,
- clientConstructor: p.clientConstructor,
- }
-
- client.NodeInfoFromNetmapElement(&rt.nodeInfo, node.info)
-
- return rt
- },
- relay: relay,
- }
-}
-
-func (p *Streamer) newECWriter(prm *PutInitPrm) transformer.ObjectWriter {
- return &objectWriterDispatcher{
- ecWriter: &ecWriter{
- cfg: p.cfg,
- placementOpts: append(prm.traverseOpts, placement.WithCopyNumbers(nil)), // copies number ignored for EC
- container: prm.cnr,
- key: p.privateKey,
- commonPrm: prm.common,
- relay: p.relay,
- },
- repWriter: p.newDefaultObjectWriter(prm, true),
- }
-}
-
func (p *Streamer) SendChunk(ctx context.Context, prm *PutChunkPrm) error {
if p.target == nil {
return errNotInit
@@ -327,10 +83,3 @@ func (p *Streamer) Close(ctx context.Context) (*PutResponse, error) {
id: ids.SelfID,
}, nil
}
-
-func (c *cfg) getWorkerPool(pub []byte) (pkgutil.WorkerPool, bool) {
- if c.netmapKeys.IsLocalKey(pub) {
- return c.localPool, true
- }
- return c.remotePool, false
-}
diff --git a/pkg/services/object/put/v2/streamer.go b/pkg/services/object/put/v2/streamer.go
index 9c6de4ca8..5bf15b4cd 100644
--- a/pkg/services/object/put/v2/streamer.go
+++ b/pkg/services/object/put/v2/streamer.go
@@ -11,6 +11,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/signature"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/target"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal"
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal/client"
putsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/put"
@@ -55,7 +56,7 @@ func (s *streamer) Send(ctx context.Context, req *object.PutRequest) (err error)
s.saveChunks = v.GetSignature() != nil
if s.saveChunks {
- maxSz := s.stream.MaxObjectSize()
+ maxSz := s.stream.MaxSizeSrc.MaxObjectSize()
s.sizes = &sizes{
payloadSz: uint64(v.GetHeader().GetPayloadLength()),
@@ -63,7 +64,7 @@ func (s *streamer) Send(ctx context.Context, req *object.PutRequest) (err error)
// check payload size limit overflow
if s.payloadSz > maxSz {
- return putsvc.ErrExceedingMaxSize
+ return target.ErrExceedingMaxSize
}
s.init = req
@@ -74,7 +75,7 @@ func (s *streamer) Send(ctx context.Context, req *object.PutRequest) (err error)
// check payload size overflow
if s.writtenPayload > s.payloadSz {
- return putsvc.ErrWrongPayloadSize
+ return target.ErrWrongPayloadSize
}
}
@@ -117,7 +118,7 @@ func (s *streamer) CloseAndRecv(ctx context.Context) (*object.PutResponse, error
if s.saveChunks {
// check payload size correctness
if s.writtenPayload != s.payloadSz {
- return nil, putsvc.ErrWrongPayloadSize
+ return nil, target.ErrWrongPayloadSize
}
}
diff --git a/pkg/services/replicator/process.go b/pkg/services/replicator/process.go
index 3d04b7084..7e5c6e093 100644
--- a/pkg/services/replicator/process.go
+++ b/pkg/services/replicator/process.go
@@ -5,7 +5,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
- putsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/put"
+ objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer"
tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
@@ -52,7 +52,7 @@ func (p *Replicator) HandleReplicationTask(ctx context.Context, task Task, res T
}
}
- prm := new(putsvc.RemotePutPrm).
+ prm := new(objectwriter.RemotePutPrm).
WithObject(task.Obj)
for i := 0; task.NumCopies > 0 && i < len(task.Nodes); i++ {
diff --git a/pkg/services/replicator/replicator.go b/pkg/services/replicator/replicator.go
index a67f2e766..f2f86daf0 100644
--- a/pkg/services/replicator/replicator.go
+++ b/pkg/services/replicator/replicator.go
@@ -4,8 +4,8 @@ import (
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
+ objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer"
getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get"
- putsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/put"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"go.uber.org/zap"
)
@@ -24,7 +24,7 @@ type cfg struct {
log *logger.Logger
- remoteSender *putsvc.RemoteSender
+ remoteSender *objectwriter.RemoteSender
remoteGetter *getsvc.RemoteGetter
@@ -67,7 +67,7 @@ func WithLogger(v *logger.Logger) Option {
}
// WithRemoteSender returns option to set remote object sender of Replicator.
-func WithRemoteSender(v *putsvc.RemoteSender) Option {
+func WithRemoteSender(v *objectwriter.RemoteSender) Option {
return func(c *cfg) {
c.remoteSender = v
}
From 108e4e07be5d75f852fee90f8c0d9e17952be3b5 Mon Sep 17 00:00:00 2001
From: Anton Nikiforov
Date: Tue, 3 Sep 2024 12:18:10 +0300
Subject: [PATCH 079/705] [#1349] node: Evacuate objects without setting mode
to `MAINTENANCE`
Signed-off-by: Anton Nikiforov
---
.../modules/control/shards_list.go | 18 ++++----
docs/evacuation.md | 6 +++
internal/metrics/engine.go | 8 ++++
pkg/local_object_storage/engine/evacuate.go | 10 +++++
.../engine/evacuate_test.go | 28 ++++++++++++
pkg/local_object_storage/engine/metrics.go | 1 +
pkg/local_object_storage/engine/shards.go | 4 ++
pkg/local_object_storage/shard/exists.go | 4 ++
pkg/local_object_storage/shard/get.go | 14 +++++-
pkg/local_object_storage/shard/head.go | 7 +++
pkg/local_object_storage/shard/info.go | 3 ++
.../shard/metrics_test.go | 3 ++
pkg/local_object_storage/shard/range.go | 4 ++
pkg/local_object_storage/shard/shard.go | 11 +++++
pkg/services/control/server/evacuate_async.go | 3 ++
pkg/services/control/server/list_shards.go | 1 +
pkg/services/control/types.proto | 3 ++
pkg/services/control/types_frostfs.pb.go | 45 ++++++++++++++++---
18 files changed, 156 insertions(+), 17 deletions(-)
diff --git a/cmd/frostfs-cli/modules/control/shards_list.go b/cmd/frostfs-cli/modules/control/shards_list.go
index e9e49bb29..a81034a9e 100644
--- a/cmd/frostfs-cli/modules/control/shards_list.go
+++ b/cmd/frostfs-cli/modules/control/shards_list.go
@@ -65,13 +65,14 @@ func prettyPrintShardsJSON(cmd *cobra.Command, ii []control.ShardInfo) {
out := make([]map[string]any, 0, len(ii))
for _, i := range ii {
out = append(out, map[string]any{
- "shard_id": base58.Encode(i.GetShard_ID()),
- "mode": shardModeToString(i.GetMode()),
- "metabase": i.GetMetabasePath(),
- "blobstor": i.GetBlobstor(),
- "writecache": i.GetWritecachePath(),
- "pilorama": i.GetPiloramaPath(),
- "error_count": i.GetErrorCount(),
+ "shard_id": base58.Encode(i.GetShard_ID()),
+ "mode": shardModeToString(i.GetMode()),
+ "metabase": i.GetMetabasePath(),
+ "blobstor": i.GetBlobstor(),
+ "writecache": i.GetWritecachePath(),
+ "pilorama": i.GetPiloramaPath(),
+ "error_count": i.GetErrorCount(),
+ "evacuation_in_progress": i.GetEvacuationInProgress(),
})
}
@@ -105,7 +106,8 @@ func prettyPrintShards(cmd *cobra.Command, ii []control.ShardInfo) {
sb.String()+
pathPrinter("Write-cache", i.GetWritecachePath())+
pathPrinter("Pilorama", i.GetPiloramaPath())+
- fmt.Sprintf("Error count: %d\n", i.GetErrorCount()),
+ fmt.Sprintf("Error count: %d\n", i.GetErrorCount())+
+ fmt.Sprintf("Evacuation in progress: %t\n", i.GetEvacuationInProgress()),
base58.Encode(i.GetShard_ID()),
shardModeToString(i.GetMode()),
)
diff --git a/docs/evacuation.md b/docs/evacuation.md
index 9db514a9e..885ce169a 100644
--- a/docs/evacuation.md
+++ b/docs/evacuation.md
@@ -10,6 +10,12 @@ First of all, by the evacuation the data is transferred to other shards of the s
Only one running evacuation process is allowed on the node at a time.
+It is not necessary to turn maintenance mode on storage node.
+
+Once evacuation from shard started, it is impossible to read data from it via public API, except the case when evacuation stopped manually or node restarted.
+
+Because it is necessary to prevent removing by policer objects with policy `REP 1 ...` from remote node during evacuation.
+
`frostfs-cli` utility is used to manage evacuation.
## Commands
diff --git a/internal/metrics/engine.go b/internal/metrics/engine.go
index e37777e40..1d01c95ed 100644
--- a/internal/metrics/engine.go
+++ b/internal/metrics/engine.go
@@ -27,6 +27,7 @@ type EngineMetrics interface {
IncRefillObjectsCount(shardID, path string, size int, success bool)
SetRefillPercent(shardID, path string, percent uint32)
SetRefillStatus(shardID, path, status string)
+ SetEvacuationInProgress(shardID string, value bool)
WriteCache() WriteCacheMetrics
GC() GCMetrics
@@ -45,6 +46,7 @@ type engineMetrics struct {
refillObjCounter *prometheus.GaugeVec
refillPayloadCounter *prometheus.GaugeVec
refillPercentCounter *prometheus.GaugeVec
+ evacuationInProgress *shardIDModeValue
gc *gcMetrics
writeCache *writeCacheMetrics
@@ -72,6 +74,7 @@ func newEngineMetrics() *engineMetrics {
refillObjCounter: newEngineGaugeVector("resync_metabase_objects_total", "Count of objects resynced from blobstore to metabase", []string{shardIDLabel, pathLabel, successLabel}),
refillPayloadCounter: newEngineGaugeVector("resync_metabase_objects_size_bytes", "Size of objects resynced from blobstore to metabase", []string{shardIDLabel, pathLabel, successLabel}),
refillPercentCounter: newEngineGaugeVector("resync_metabase_complete_percent", "Percent of resynced from blobstore to metabase completeness", []string{shardIDLabel, pathLabel}),
+ evacuationInProgress: newShardIDMode(engineSubsystem, "evacuation_in_progress", "Shard evacuation in progress"),
}
}
@@ -124,6 +127,7 @@ func (m *engineMetrics) DeleteShardMetrics(shardID string) {
m.refillPercentCounter.DeletePartialMatch(prometheus.Labels{shardIDLabel: shardID})
m.mode.Delete(shardID)
m.refillStatus.DeleteByShardID(shardID)
+ m.evacuationInProgress.Delete(shardID)
}
func (m *engineMetrics) AddToObjectCounter(shardID, objectType string, delta int) {
@@ -213,3 +217,7 @@ func (m *engineMetrics) SetRefillPercent(shardID, path string, percent uint32) {
func (m *engineMetrics) SetRefillStatus(shardID, path, status string) {
m.refillStatus.SetMode(shardID, path, status)
}
+
+func (m *engineMetrics) SetEvacuationInProgress(shardID string, value bool) {
+ m.evacuationInProgress.SetMode(shardID, strconv.FormatBool(value))
+}
diff --git a/pkg/local_object_storage/engine/evacuate.go b/pkg/local_object_storage/engine/evacuate.go
index 04e427e49..7bef6edfb 100644
--- a/pkg/local_object_storage/engine/evacuate.go
+++ b/pkg/local_object_storage/engine/evacuate.go
@@ -366,6 +366,7 @@ func (e *StorageEngine) evacuateShardObjects(ctx context.Context, shardID string
listPrm.WithCount(defaultEvacuateBatchSize)
sh := shardsToEvacuate[shardID]
+ sh.SetEvacuationInProgress(true)
var c *meta.Cursor
for {
@@ -655,6 +656,7 @@ func (e *StorageEngine) evacuateObjects(ctx context.Context, sh *shard.Shard, to
var getPrm shard.GetPrm
getPrm.SetAddress(addr)
+ getPrm.SkipEvacCheck(true)
getRes, err := sh.Get(ctx, getPrm)
if err != nil {
@@ -765,3 +767,11 @@ func (e *StorageEngine) ResetEvacuationStatus(ctx context.Context) error {
return e.evacuateLimiter.ResetEvacuationStatus()
}
+
+func (e *StorageEngine) ResetEvacuationStatusForShards() {
+ e.mtx.RLock()
+ defer e.mtx.RUnlock()
+ for _, sh := range e.shards {
+ sh.SetEvacuationInProgress(false)
+ }
+}
diff --git a/pkg/local_object_storage/engine/evacuate_test.go b/pkg/local_object_storage/engine/evacuate_test.go
index 8d25dad4a..28529fab9 100644
--- a/pkg/local_object_storage/engine/evacuate_test.go
+++ b/pkg/local_object_storage/engine/evacuate_test.go
@@ -125,6 +125,34 @@ func TestEvacuateShardObjects(t *testing.T) {
// Second case ensures that all objects are indeed moved and available.
checkHasObjects(t)
+ // Objects on evacuated shards should be logically unavailable, but persisted on disk.
+ // This is necessary to prevent removing it by policer in case of `REP 1` policy.
+ for _, obj := range objects[len(objects)-objPerShard:] {
+ var prmGet shard.GetPrm
+ prmGet.SetAddress(objectCore.AddressOf(obj))
+ _, err = e.shards[evacuateShardID].Get(context.Background(), prmGet)
+ require.Error(t, err)
+
+ prmGet.SkipEvacCheck(true)
+ _, err = e.shards[evacuateShardID].Get(context.Background(), prmGet)
+ require.NoError(t, err)
+
+ var prmHead shard.HeadPrm
+ prmHead.SetAddress(objectCore.AddressOf(obj))
+ _, err = e.shards[evacuateShardID].Head(context.Background(), prmHead)
+ require.Error(t, err)
+
+ var existsPrm shard.ExistsPrm
+ existsPrm.Address = objectCore.AddressOf(obj)
+ _, err = e.shards[evacuateShardID].Exists(context.Background(), existsPrm)
+ require.Error(t, err)
+
+ var rngPrm shard.RngPrm
+ rngPrm.SetAddress(objectCore.AddressOf(obj))
+ _, err = e.shards[evacuateShardID].GetRange(context.Background(), rngPrm)
+ require.Error(t, err)
+ }
+
// Calling it again is OK, but all objects are already moved, so no new PUTs should be done.
res, err = e.Evacuate(context.Background(), prm)
require.NoError(t, err)
diff --git a/pkg/local_object_storage/engine/metrics.go b/pkg/local_object_storage/engine/metrics.go
index 72b5ae252..1c088c754 100644
--- a/pkg/local_object_storage/engine/metrics.go
+++ b/pkg/local_object_storage/engine/metrics.go
@@ -30,6 +30,7 @@ type MetricRegister interface {
IncRefillObjectsCount(shardID, path string, size int, success bool)
SetRefillPercent(shardID, path string, percent uint32)
SetRefillStatus(shardID, path, status string)
+ SetEvacuationInProgress(shardID string, value bool)
WriteCache() metrics.WriteCacheMetrics
GC() metrics.GCMetrics
diff --git a/pkg/local_object_storage/engine/shards.go b/pkg/local_object_storage/engine/shards.go
index 980b38a63..40584149e 100644
--- a/pkg/local_object_storage/engine/shards.go
+++ b/pkg/local_object_storage/engine/shards.go
@@ -97,6 +97,10 @@ func (m *metricsWithID) SetRefillStatus(path string, status string) {
m.mw.SetRefillStatus(m.id, path, status)
}
+func (m *metricsWithID) SetEvacuationInProgress(value bool) {
+ m.mw.SetEvacuationInProgress(m.id, value)
+}
+
// AddShard adds a new shard to the storage engine.
//
// Returns any error encountered that did not allow adding a shard.
diff --git a/pkg/local_object_storage/shard/exists.go b/pkg/local_object_storage/shard/exists.go
index b5a9604b4..784bf293a 100644
--- a/pkg/local_object_storage/shard/exists.go
+++ b/pkg/local_object_storage/shard/exists.go
@@ -5,7 +5,9 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
@@ -60,6 +62,8 @@ func (s *Shard) Exists(ctx context.Context, prm ExistsPrm) (ExistsRes, error) {
if s.info.Mode.Disabled() {
return ExistsRes{}, ErrShardDisabled
+ } else if s.info.EvacuationInProgress {
+ return ExistsRes{}, logicerr.Wrap(new(apistatus.ObjectNotFound))
} else if s.info.Mode.NoMetabase() {
var p common.ExistsPrm
p.Address = prm.Address
diff --git a/pkg/local_object_storage/shard/get.go b/pkg/local_object_storage/shard/get.go
index 2e7c84bcd..d1c393613 100644
--- a/pkg/local_object_storage/shard/get.go
+++ b/pkg/local_object_storage/shard/get.go
@@ -27,8 +27,9 @@ type storFetcher = func(stor *blobstor.BlobStor, id []byte) (*objectSDK.Object,
// GetPrm groups the parameters of Get operation.
type GetPrm struct {
- addr oid.Address
- skipMeta bool
+ addr oid.Address
+ skipMeta bool
+ skipEvacCheck bool
}
// GetRes groups the resulting values of Get operation.
@@ -50,6 +51,11 @@ func (p *GetPrm) SetIgnoreMeta(ignore bool) {
p.skipMeta = ignore
}
+// SkipEvacCheck is a Get option which instruct to skip check is evacuation in progress.
+func (p *GetPrm) SkipEvacCheck(val bool) {
+ p.skipEvacCheck = val
+}
+
// Object returns the requested object.
func (r GetRes) Object() *objectSDK.Object {
return r.obj
@@ -85,6 +91,10 @@ func (s *Shard) Get(ctx context.Context, prm GetPrm) (GetRes, error) {
return GetRes{}, ErrShardDisabled
}
+ if s.info.EvacuationInProgress && !prm.skipEvacCheck {
+ return GetRes{}, logicerr.Wrap(new(apistatus.ObjectNotFound))
+ }
+
cb := func(stor *blobstor.BlobStor, id []byte) (*objectSDK.Object, error) {
var getPrm common.GetPrm
getPrm.Address = prm.addr
diff --git a/pkg/local_object_storage/shard/head.go b/pkg/local_object_storage/shard/head.go
index 9d5d31260..ff57e3bf9 100644
--- a/pkg/local_object_storage/shard/head.go
+++ b/pkg/local_object_storage/shard/head.go
@@ -4,7 +4,9 @@ import (
"context"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.opentelemetry.io/otel/attribute"
@@ -70,6 +72,11 @@ func (s *Shard) Head(ctx context.Context, prm HeadPrm) (HeadRes, error) {
res, err = s.Get(ctx, getPrm)
obj = res.Object()
} else {
+ s.m.RLock()
+ defer s.m.RUnlock()
+ if s.info.EvacuationInProgress {
+ return HeadRes{}, logicerr.Wrap(new(apistatus.ObjectNotFound))
+ }
var headParams meta.GetPrm
headParams.SetAddress(prm.addr)
headParams.SetRaw(prm.raw)
diff --git a/pkg/local_object_storage/shard/info.go b/pkg/local_object_storage/shard/info.go
index 1051ab3db..f01796ec7 100644
--- a/pkg/local_object_storage/shard/info.go
+++ b/pkg/local_object_storage/shard/info.go
@@ -16,6 +16,9 @@ type Info struct {
// Shard mode.
Mode mode.Mode
+ // True when evacuation is in progress.
+ EvacuationInProgress bool
+
// Information about the metabase.
MetaBaseInfo meta.Info
diff --git a/pkg/local_object_storage/shard/metrics_test.go b/pkg/local_object_storage/shard/metrics_test.go
index 1ef849c02..01a85da97 100644
--- a/pkg/local_object_storage/shard/metrics_test.go
+++ b/pkg/local_object_storage/shard/metrics_test.go
@@ -192,6 +192,9 @@ func (m *metricsStore) SetRefillStatus(_ string, status string) {
m.refillStatus = status
}
+func (m *metricsStore) SetEvacuationInProgress(bool) {
+}
+
func TestCounters(t *testing.T) {
t.Parallel()
diff --git a/pkg/local_object_storage/shard/range.go b/pkg/local_object_storage/shard/range.go
index 9491543c4..701268820 100644
--- a/pkg/local_object_storage/shard/range.go
+++ b/pkg/local_object_storage/shard/range.go
@@ -87,6 +87,10 @@ func (s *Shard) GetRange(ctx context.Context, prm RngPrm) (RngRes, error) {
s.m.RLock()
defer s.m.RUnlock()
+ if s.info.EvacuationInProgress {
+ return RngRes{}, logicerr.Wrap(new(apistatus.ObjectNotFound))
+ }
+
if s.info.Mode.Disabled() {
return RngRes{}, ErrShardDisabled
}
diff --git a/pkg/local_object_storage/shard/shard.go b/pkg/local_object_storage/shard/shard.go
index d11bcc36b..ac389b506 100644
--- a/pkg/local_object_storage/shard/shard.go
+++ b/pkg/local_object_storage/shard/shard.go
@@ -97,6 +97,8 @@ type MetricsWriter interface {
SetRefillPercent(path string, percent uint32)
// SetRefillStatus sets refill status.
SetRefillStatus(path string, status string)
+ // SetEvacuationInProgress sets evacuation status
+ SetEvacuationInProgress(value bool)
}
type cfg struct {
@@ -579,3 +581,12 @@ func (s *Shard) DeleteShardMetrics() {
s.cfg.metricsWriter.DeleteShardMetrics()
}
}
+
+func (s *Shard) SetEvacuationInProgress(val bool) {
+ s.m.Lock()
+ defer s.m.Unlock()
+ s.info.EvacuationInProgress = val
+ if s.metricsWriter != nil {
+ s.metricsWriter.SetEvacuationInProgress(val)
+ }
+}
diff --git a/pkg/services/control/server/evacuate_async.go b/pkg/services/control/server/evacuate_async.go
index b829573ec..aacebe9e3 100644
--- a/pkg/services/control/server/evacuate_async.go
+++ b/pkg/services/control/server/evacuate_async.go
@@ -101,6 +101,9 @@ func (s *Server) StopShardEvacuation(ctx context.Context, req *control.StopShard
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
+
+ s.s.ResetEvacuationStatusForShards()
+
return resp, nil
}
diff --git a/pkg/services/control/server/list_shards.go b/pkg/services/control/server/list_shards.go
index 56bd9fc1f..efe2754ea 100644
--- a/pkg/services/control/server/list_shards.go
+++ b/pkg/services/control/server/list_shards.go
@@ -53,6 +53,7 @@ func (s *Server) ListShards(_ context.Context, req *control.ListShardsRequest) (
si.SetMode(m)
si.SetErrorCount(sh.ErrorCount)
+ si.SetEvacuationInProgress(sh.EvacuationInProgress)
shardInfos = append(shardInfos, *si)
}
diff --git a/pkg/services/control/types.proto b/pkg/services/control/types.proto
index 55636d88a..d8135ed64 100644
--- a/pkg/services/control/types.proto
+++ b/pkg/services/control/types.proto
@@ -142,6 +142,9 @@ message ShardInfo {
// Path to shard's pilorama storage.
string pilorama_path = 7 [ json_name = "piloramaPath" ];
+
+ // Evacuation status.
+ bool evacuation_in_progress = 8 [ json_name = "evacuationInProgress" ];
}
// Blobstor component description.
diff --git a/pkg/services/control/types_frostfs.pb.go b/pkg/services/control/types_frostfs.pb.go
index 42c1afa52..f92106589 100644
--- a/pkg/services/control/types_frostfs.pb.go
+++ b/pkg/services/control/types_frostfs.pb.go
@@ -954,13 +954,14 @@ func (x *Netmap) UnmarshalEasyJSON(in *jlexer.Lexer) {
}
type ShardInfo struct {
- Shard_ID []byte `json:"shardID"`
- MetabasePath string `json:"metabasePath"`
- Blobstor []BlobstorInfo `json:"blobstor"`
- WritecachePath string `json:"writecachePath"`
- Mode ShardMode `json:"mode"`
- ErrorCount uint32 `json:"errorCount"`
- PiloramaPath string `json:"piloramaPath"`
+ Shard_ID []byte `json:"shardID"`
+ MetabasePath string `json:"metabasePath"`
+ Blobstor []BlobstorInfo `json:"blobstor"`
+ WritecachePath string `json:"writecachePath"`
+ Mode ShardMode `json:"mode"`
+ ErrorCount uint32 `json:"errorCount"`
+ PiloramaPath string `json:"piloramaPath"`
+ EvacuationInProgress bool `json:"evacuationInProgress"`
}
var (
@@ -986,6 +987,7 @@ func (x *ShardInfo) StableSize() (size int) {
size += proto.EnumSize(5, int32(x.Mode))
size += proto.UInt32Size(6, x.ErrorCount)
size += proto.StringSize(7, x.PiloramaPath)
+ size += proto.BoolSize(8, x.EvacuationInProgress)
return size
}
@@ -1023,6 +1025,9 @@ func (x *ShardInfo) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if len(x.PiloramaPath) != 0 {
mm.AppendString(7, x.PiloramaPath)
}
+ if x.EvacuationInProgress {
+ mm.AppendBool(8, x.EvacuationInProgress)
+ }
}
// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
@@ -1080,6 +1085,12 @@ func (x *ShardInfo) UnmarshalProtobuf(src []byte) (err error) {
return fmt.Errorf("cannot unmarshal field %s", "PiloramaPath")
}
x.PiloramaPath = data
+ case 8: // EvacuationInProgress
+ data, ok := fc.Bool()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "EvacuationInProgress")
+ }
+ x.EvacuationInProgress = data
}
}
return nil
@@ -1147,6 +1158,15 @@ func (x *ShardInfo) GetPiloramaPath() string {
func (x *ShardInfo) SetPiloramaPath(v string) {
x.PiloramaPath = v
}
+func (x *ShardInfo) GetEvacuationInProgress() bool {
+ if x != nil {
+ return x.EvacuationInProgress
+ }
+ return false
+}
+func (x *ShardInfo) SetEvacuationInProgress(v bool) {
+ x.EvacuationInProgress = v
+}
// MarshalJSON implements the json.Marshaler interface.
func (x *ShardInfo) MarshalJSON() ([]byte, error) {
@@ -1202,6 +1222,11 @@ func (x *ShardInfo) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString(prefix)
out.String(x.PiloramaPath)
}
+ {
+ const prefix string = ",\"evacuationInProgress\":"
+ out.RawString(prefix)
+ out.Bool(x.EvacuationInProgress)
+ }
out.RawByte('}')
}
@@ -1296,6 +1321,12 @@ func (x *ShardInfo) UnmarshalEasyJSON(in *jlexer.Lexer) {
f = in.String()
x.PiloramaPath = f
}
+ case "evacuationInProgress":
+ {
+ var f bool
+ f = in.Bool()
+ x.EvacuationInProgress = f
+ }
}
in.WantComma()
}
From 273980cfb99a29eef0dbe4bd013c7a0041b6db79 Mon Sep 17 00:00:00 2001
From: Airat Arifullin
Date: Thu, 5 Sep 2024 16:40:32 +0300
Subject: [PATCH 080/705] [#1310] object: Remove irrelevant comments
Signed-off-by: Airat Arifullin
---
pkg/services/object/common/target/target.go | 2 --
1 file changed, 2 deletions(-)
diff --git a/pkg/services/object/common/target/target.go b/pkg/services/object/common/target/target.go
index 00080ace6..980c4c6bd 100644
--- a/pkg/services/object/common/target/target.go
+++ b/pkg/services/object/common/target/target.go
@@ -116,7 +116,6 @@ func preparePrm(prm *objectwriter.Params) error {
// get latest network map
nm, err := netmap.GetLatestNetworkMap(prm.Config.NetmapSource)
if err != nil {
- //return fmt.Errorf("(%T) could not get latest network map: %w", p, err)
return fmt.Errorf("could not get latest network map: %w", err)
}
@@ -128,7 +127,6 @@ func preparePrm(prm *objectwriter.Params) error {
// get container to store the object
cnrInfo, err := prm.Config.ContainerSource.Get(idCnr)
if err != nil {
- //return fmt.Errorf("(%T) could not get container by ID: %w", p, err)
return fmt.Errorf("could not get container by ID: %w", err)
}
From f652518c241f405db22259c753be7f8685f39cc7 Mon Sep 17 00:00:00 2001
From: Ekaterina Lebedeva
Date: Fri, 6 Sep 2024 13:09:58 +0300
Subject: [PATCH 081/705] [#1357] go: Fix panic caused by using range over
slice len
If slice is altered in `for` loop, we cannot use range over its
length: it may cause panic if slice gets shorter.
Signed-off-by: Ekaterina Lebedeva
---
pkg/services/control/server/evacuate.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/pkg/services/control/server/evacuate.go b/pkg/services/control/server/evacuate.go
index 300cb9dc9..ae3413373 100644
--- a/pkg/services/control/server/evacuate.go
+++ b/pkg/services/control/server/evacuate.go
@@ -169,7 +169,7 @@ func (s *Server) getContainerNodes(contID cid.ID) ([]netmap.NodeInfo, error) {
nodes := placement.FlattenNodes(ns)
bs := (*keys.PublicKey)(&s.key.PublicKey).Bytes()
- for i := range len(nodes) {
+ for i := 0; i < len(nodes); i++ { // don't use range, slice mutates in body
if bytes.Equal(nodes[i].PublicKey(), bs) {
copy(nodes[i:], nodes[i+1:])
nodes = nodes[:len(nodes)-1]
From 007827255ee65e9d7fd61c0f6bdd6bb59479bac1 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Tue, 27 Aug 2024 15:51:55 +0300
Subject: [PATCH 082/705] [#1337] blobovniczatree: Add .rebuild temp files
This allows to reduce open/close DBs to check incompleted rebuilds.
Signed-off-by: Dmitrii Stepanov
---
internal/logs/logs.go | 1 +
.../blobstor/blobovniczatree/blobovnicza.go | 2 +-
.../blobstor/blobovniczatree/control.go | 6 +--
.../blobstor/blobovniczatree/iterate.go | 21 ++++++++--
.../blobstor/blobovniczatree/rebuild.go | 38 ++++++++++++++++++-
.../blobovniczatree/rebuild_failover_test.go | 12 ++++++
6 files changed, 71 insertions(+), 9 deletions(-)
diff --git a/internal/logs/logs.go b/internal/logs/logs.go
index 78bcd0c0e..97b189529 100644
--- a/internal/logs/logs.go
+++ b/internal/logs/logs.go
@@ -543,4 +543,5 @@ const (
WritecacheSealCompletedAsync = "writecache seal completed successfully"
FailedToSealWritecacheAsync = "failed to seal writecache async"
WritecacheShrinkSkippedNotEmpty = "writecache shrink skipped: database is not empty"
+ BlobovniczatreeFailedToRemoveRebuildTempFile = "failed to remove rebuild temp file"
)
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go b/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go
index 952203367..c909113c7 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go
@@ -135,7 +135,7 @@ func getBlobovniczaMaxIndex(directory string) (bool, uint64, error) {
var hasDBs bool
var maxIdx uint64
for _, e := range entries {
- if e.IsDir() {
+ if e.IsDir() || strings.HasSuffix(e.Name(), rebuildSuffix) {
continue
}
hasDBs = true
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/control.go b/pkg/local_object_storage/blobstor/blobovniczatree/control.go
index a31e9d6cb..681cf876c 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/control.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/control.go
@@ -2,6 +2,7 @@ package blobovniczatree
import (
"context"
+ "strings"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
@@ -41,10 +42,9 @@ func (b *Blobovniczas) initializeDBs(ctx context.Context) error {
eg, egCtx := errgroup.WithContext(ctx)
eg.SetLimit(b.blzInitWorkerCount)
- visited := make(map[string]struct{})
- err = b.iterateExistingDBPaths(egCtx, func(p string) (bool, error) {
- visited[p] = struct{}{}
+ err = b.iterateIncompletedRebuildDBPaths(egCtx, func(p string) (bool, error) {
eg.Go(func() error {
+ p = strings.TrimSuffix(p, rebuildSuffix)
shBlz := b.getBlobovniczaWithoutCaching(p)
blz, err := shBlz.Open()
if err != nil {
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/iterate.go b/pkg/local_object_storage/blobstor/blobovniczatree/iterate.go
index af3d9e720..f6acb46aa 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/iterate.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/iterate.go
@@ -188,11 +188,11 @@ func (b *Blobovniczas) iterateExistingDBPaths(ctx context.Context, f func(string
b.dbFilesGuard.RLock()
defer b.dbFilesGuard.RUnlock()
- _, err := b.iterateExistingDBPathsDFS(ctx, "", f)
+ _, err := b.iterateExistingPathsDFS(ctx, "", f, func(path string) bool { return !strings.HasSuffix(path, rebuildSuffix) })
return err
}
-func (b *Blobovniczas) iterateExistingDBPathsDFS(ctx context.Context, path string, f func(string) (bool, error)) (bool, error) {
+func (b *Blobovniczas) iterateExistingPathsDFS(ctx context.Context, path string, f func(string) (bool, error), fileFilter func(path string) bool) (bool, error) {
sysPath := filepath.Join(b.rootPath, path)
entries, err := os.ReadDir(sysPath)
if os.IsNotExist(err) && b.readOnly && path == "" { // non initialized tree in read only mode
@@ -208,7 +208,7 @@ func (b *Blobovniczas) iterateExistingDBPathsDFS(ctx context.Context, path strin
default:
}
if entry.IsDir() {
- stop, err := b.iterateExistingDBPathsDFS(ctx, filepath.Join(path, entry.Name()), f)
+ stop, err := b.iterateExistingPathsDFS(ctx, filepath.Join(path, entry.Name()), f, fileFilter)
if err != nil {
return false, err
}
@@ -216,6 +216,9 @@ func (b *Blobovniczas) iterateExistingDBPathsDFS(ctx context.Context, path strin
return true, nil
}
} else {
+ if !fileFilter(entry.Name()) {
+ continue
+ }
stop, err := f(filepath.Join(path, entry.Name()))
if err != nil {
return false, err
@@ -228,6 +231,15 @@ func (b *Blobovniczas) iterateExistingDBPathsDFS(ctx context.Context, path strin
return false, nil
}
+// iterateIncompletedRebuildDBPaths iterates over the paths of Blobovniczas with incompleted rebuild files without any order.
+func (b *Blobovniczas) iterateIncompletedRebuildDBPaths(ctx context.Context, f func(string) (bool, error)) error {
+ b.dbFilesGuard.RLock()
+ defer b.dbFilesGuard.RUnlock()
+
+ _, err := b.iterateExistingPathsDFS(ctx, "", f, func(path string) bool { return strings.HasSuffix(path, rebuildSuffix) })
+ return err
+}
+
func (b *Blobovniczas) iterateSortedDBPaths(ctx context.Context, addr oid.Address, f func(string) (bool, error)) error {
b.dbFilesGuard.RLock()
defer b.dbFilesGuard.RUnlock()
@@ -249,6 +261,9 @@ func (b *Blobovniczas) iterateSordedDBPathsInternal(ctx context.Context, path st
var dirIdxs []uint64
for _, entry := range entries {
+ if strings.HasSuffix(entry.Name(), rebuildSuffix) {
+ continue
+ }
idx := u64FromHexString(entry.Name())
if entry.IsDir() {
dirIdxs = append(dirIdxs, idx)
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go
index cfc17cfae..058fe1fb6 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go
@@ -6,6 +6,7 @@ import (
"errors"
"os"
"path/filepath"
+ "strings"
"sync"
"sync/atomic"
"time"
@@ -19,6 +20,8 @@ import (
"golang.org/x/sync/errgroup"
)
+const rebuildSuffix = ".rebuild"
+
var (
errRebuildInProgress = errors.New("rebuild is in progress, the operation cannot be performed")
errBatchFull = errors.New("batch full")
@@ -124,15 +127,36 @@ func (b *Blobovniczas) rebuildDB(ctx context.Context, path string, meta common.M
}
shDB.Close()
}()
-
+ dropTempFile, err := b.addRebuildTempFile(path)
+ if err != nil {
+ return 0, err
+ }
migratedObjects, err := b.moveObjects(ctx, blz, shDB.SystemPath(), meta, limiter)
if err != nil {
return migratedObjects, err
}
shDBClosed, err = b.dropDB(ctx, path, shDB)
+ if err == nil {
+ // drop only on success to continue rebuild on error
+ dropTempFile()
+ }
return migratedObjects, err
}
+func (b *Blobovniczas) addRebuildTempFile(path string) (func(), error) {
+ sysPath := filepath.Join(b.rootPath, path)
+ sysPath = sysPath + rebuildSuffix
+ _, err := os.OpenFile(sysPath, os.O_RDWR|os.O_CREATE|os.O_EXCL|os.O_SYNC, b.perm)
+ if err != nil {
+ return nil, err
+ }
+ return func() {
+ if err := os.Remove(sysPath); err != nil {
+ b.log.Warn(logs.BlobovniczatreeFailedToRemoveRebuildTempFile, zap.Error(err))
+ }
+ }, nil
+}
+
func (b *Blobovniczas) moveObjects(ctx context.Context, blz *blobovnicza.Blobovnicza, blzPath string, meta common.MetaStorage, limiter common.ConcurrentWorkersLimiter) (uint64, error) {
var result atomic.Uint64
batch := make(map[oid.Address][]byte)
@@ -256,7 +280,10 @@ func (b *Blobovniczas) dropDirectoryIfEmpty(path string) error {
func (b *Blobovniczas) completeIncompletedMove(ctx context.Context, metaStore common.MetaStorage) (uint64, error) {
var count uint64
- return count, b.iterateExistingDBPaths(ctx, func(s string) (bool, error) {
+ var rebuildTempFilesToRemove []string
+ err := b.iterateIncompletedRebuildDBPaths(ctx, func(s string) (bool, error) {
+ rebuildTmpFilePath := s
+ s = strings.TrimSuffix(s, rebuildSuffix)
shDB := b.getBlobovnicza(s)
blz, err := shDB.Open()
if err != nil {
@@ -276,8 +303,15 @@ func (b *Blobovniczas) completeIncompletedMove(ctx context.Context, metaStore co
count++
}
+ rebuildTempFilesToRemove = append(rebuildTempFilesToRemove, rebuildTmpFilePath)
return false, nil
})
+ for _, tmp := range rebuildTempFilesToRemove {
+ if err := os.Remove(filepath.Join(b.rootPath, tmp)); err != nil {
+ b.log.Warn(logs.BlobovniczatreeFailedToRemoveRebuildTempFile, zap.Error(err))
+ }
+ }
+ return count, err
}
func (b *Blobovniczas) performMove(ctx context.Context, source *blobovnicza.Blobovnicza, sourcePath string,
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_failover_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_failover_test.go
index a6afed60c..9fec795ca 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_failover_test.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_failover_test.go
@@ -3,6 +3,7 @@ package blobovniczatree
import (
"bytes"
"context"
+ "os"
"path/filepath"
"sync"
"testing"
@@ -53,6 +54,8 @@ func testRebuildFailoverOnlyMoveInfoSaved(t *testing.T) {
}))
require.NoError(t, blz.Close())
+ _, err = os.OpenFile(filepath.Join(dir, "0", "0", "1.db.rebuild"), os.O_RDWR|os.O_CREATE|os.O_EXCL|os.O_SYNC, defaultPerm)
+ require.NoError(t, err)
testRebuildFailoverValidate(t, dir, obj, true)
}
@@ -82,6 +85,9 @@ func testRebuildFailoverObjectSavedToTarget(t *testing.T) {
require.NoError(t, blz.Close())
+ _, err = os.OpenFile(filepath.Join(dir, "0", "0", "1.db.rebuild"), os.O_RDWR|os.O_CREATE|os.O_EXCL|os.O_SYNC, defaultPerm)
+ require.NoError(t, err)
+
blz = blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "0.db")))
require.NoError(t, blz.Open())
require.NoError(t, blz.Init())
@@ -113,6 +119,9 @@ func testRebuildFailoverObjectDeletedFromSource(t *testing.T) {
require.NoError(t, blz.Close())
+ _, err = os.OpenFile(filepath.Join(dir, "0", "0", "1.db.rebuild"), os.O_RDWR|os.O_CREATE|os.O_EXCL|os.O_SYNC, defaultPerm)
+ require.NoError(t, err)
+
blz = blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "0.db")))
require.NoError(t, blz.Open())
require.NoError(t, blz.Init())
@@ -194,4 +203,7 @@ func testRebuildFailoverValidate(t *testing.T, dir string, obj *objectSDK.Object
}
require.NoError(t, blz.Close())
+
+ _, err = os.Stat(filepath.Join(dir, "0", "0", "1.db.rebuild"))
+ require.True(t, os.IsNotExist(err))
}
From d508da8397026fac3fd2a5c77846f1a5ec61a665 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Thu, 29 Aug 2024 11:34:18 +0300
Subject: [PATCH 083/705] [#1337] blobovniczatree: Add rebuild by fill percent
Signed-off-by: Dmitrii Stepanov
---
pkg/local_object_storage/blobovnicza/sizes.go | 4 +
.../blobstor/blobovniczatree/rebuild.go | 93 ++++++++-
.../blobstor/blobovniczatree/rebuild_test.go | 195 +++++++++++++++++-
.../blobstor/common/rebuild.go | 8 +
pkg/local_object_storage/blobstor/rebuild.go | 3 +-
pkg/local_object_storage/shard/rebuild.go | 173 ++++++++++++++++
.../shard/rebuild_limiter.go | 13 --
pkg/local_object_storage/shard/rebuilder.go | 98 ---------
pkg/local_object_storage/shard/shard.go | 2 +-
9 files changed, 470 insertions(+), 119 deletions(-)
create mode 100644 pkg/local_object_storage/shard/rebuild.go
delete mode 100644 pkg/local_object_storage/shard/rebuild_limiter.go
delete mode 100644 pkg/local_object_storage/shard/rebuilder.go
diff --git a/pkg/local_object_storage/blobovnicza/sizes.go b/pkg/local_object_storage/blobovnicza/sizes.go
index 1dff75aed..9bbed0db5 100644
--- a/pkg/local_object_storage/blobovnicza/sizes.go
+++ b/pkg/local_object_storage/blobovnicza/sizes.go
@@ -57,3 +57,7 @@ func (b *Blobovnicza) itemDeleted(itemSize uint64) {
func (b *Blobovnicza) IsFull() bool {
return b.dataSize.Load() >= b.fullSizeLimit
}
+
+func (b *Blobovnicza) FillPercent() int {
+ return int(100.0 * (float64(b.dataSize.Load()) / float64(b.fullSizeLimit)))
+}
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go
index 058fe1fb6..b7f20822e 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go
@@ -4,6 +4,7 @@ import (
"bytes"
"context"
"errors"
+ "fmt"
"os"
"path/filepath"
"strings"
@@ -59,7 +60,7 @@ func (b *Blobovniczas) Rebuild(ctx context.Context, prm common.RebuildPrm) (comm
b.log.Debug(logs.BlobovniczaTreeCompletedPreviousRebuildSuccess)
b.log.Debug(logs.BlobovniczaTreeCollectingDBToRebuild)
- dbsToMigrate, err := b.getDBsToRebuild(ctx)
+ dbsToMigrate, err := b.getDBsToRebuild(ctx, prm.Action)
if err != nil {
b.log.Warn(logs.BlobovniczaTreeCollectingDBToRebuildFailed, zap.Error(err))
success = false
@@ -93,7 +94,33 @@ func (b *Blobovniczas) migrateDBs(ctx context.Context, dbs []string, prm common.
return res, nil
}
-func (b *Blobovniczas) getDBsToRebuild(ctx context.Context) ([]string, error) {
+func (b *Blobovniczas) getDBsToRebuild(ctx context.Context, action common.RebuildAction) ([]string, error) {
+ schemaChange := make(map[string]struct{})
+ fillPercent := make(map[string]struct{})
+ var err error
+ if action.SchemaChange {
+ schemaChange, err = b.selectDBsDoNotMatchSchema(ctx)
+ if err != nil {
+ return nil, err
+ }
+ }
+ if action.FillPercent {
+ fillPercent, err = b.selectDBsDoNotMatchFillPercent(ctx, action.FillPercentValue)
+ if err != nil {
+ return nil, err
+ }
+ }
+ for k := range fillPercent {
+ schemaChange[k] = struct{}{}
+ }
+ result := make([]string, 0, len(schemaChange))
+ for db := range schemaChange {
+ result = append(result, db)
+ }
+ return result, nil
+}
+
+func (b *Blobovniczas) selectDBsDoNotMatchSchema(ctx context.Context) (map[string]struct{}, error) {
dbsToMigrate := make(map[string]struct{})
if err := b.iterateExistingDBPaths(ctx, func(s string) (bool, error) {
dbsToMigrate[s] = struct{}{}
@@ -107,13 +134,69 @@ func (b *Blobovniczas) getDBsToRebuild(ctx context.Context) ([]string, error) {
}); err != nil {
return nil, err
}
- result := make([]string, 0, len(dbsToMigrate))
- for db := range dbsToMigrate {
- result = append(result, db)
+ return dbsToMigrate, nil
+}
+
+func (b *Blobovniczas) selectDBsDoNotMatchFillPercent(ctx context.Context, target int) (map[string]struct{}, error) {
+ if target <= 0 || target > 100 {
+ return nil, fmt.Errorf("invalid fill percent value %d: must be (0; 100]", target)
+ }
+ result := make(map[string]struct{})
+ if err := b.iterateDeepest(ctx, oid.Address{}, func(lvlPath string) (bool, error) {
+ dir := filepath.Join(b.rootPath, lvlPath)
+ entries, err := os.ReadDir(dir)
+ if os.IsNotExist(err) { // non initialized tree
+ return false, nil
+ }
+ if err != nil {
+ return false, err
+ }
+ hasDBs := false
+ // db with maxIdx could be an active, so it should not be rebuilded
+ var maxIdx uint64
+ for _, e := range entries {
+ if e.IsDir() || strings.HasSuffix(e.Name(), rebuildSuffix) {
+ continue
+ }
+ hasDBs = true
+ maxIdx = max(u64FromHexString(e.Name()), maxIdx)
+ }
+ if !hasDBs {
+ return false, nil
+ }
+ for _, e := range entries {
+ if e.IsDir() || strings.HasSuffix(e.Name(), rebuildSuffix) {
+ continue
+ }
+ if u64FromHexString(e.Name()) == maxIdx {
+ continue
+ }
+ path := filepath.Join(lvlPath, e.Name())
+ resettlementRequired, err := b.fillPercentIsLow(path, target)
+ if err != nil {
+ return false, err
+ }
+ if resettlementRequired {
+ result[path] = struct{}{}
+ }
+ }
+ return false, nil
+ }); err != nil {
+ return nil, err
}
return result, nil
}
+func (b *Blobovniczas) fillPercentIsLow(path string, target int) (bool, error) {
+ shDB := b.getBlobovnicza(path)
+ blz, err := shDB.Open()
+ if err != nil {
+ return false, err
+ }
+ defer shDB.Close()
+ return blz.FillPercent() < target, nil
+}
+
func (b *Blobovniczas) rebuildDB(ctx context.Context, path string, meta common.MetaStorage, limiter common.ConcurrentWorkersLimiter) (uint64, error) {
shDB := b.getBlobovnicza(path)
blz, err := shDB.Open()
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go
index 4a51fd86a..62ae9ea90 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go
@@ -15,7 +15,7 @@ import (
"golang.org/x/sync/errgroup"
)
-func TestBlobovniczaTreeRebuild(t *testing.T) {
+func TestBlobovniczaTreeSchemaRebuild(t *testing.T) {
t.Parallel()
t.Run("width increased", func(t *testing.T) {
@@ -39,6 +39,197 @@ func TestBlobovniczaTreeRebuild(t *testing.T) {
})
}
+func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) {
+ t.Parallel()
+
+ t.Run("no rebuild by fill percent", func(t *testing.T) {
+ t.Parallel()
+
+ dir := t.TempDir()
+ b := NewBlobovniczaTree(
+ context.Background(),
+ WithLogger(test.NewLogger(t)),
+ WithObjectSizeLimit(64*1024),
+ WithBlobovniczaShallowWidth(1), // single directory
+ WithBlobovniczaShallowDepth(1),
+ WithRootPath(dir),
+ WithBlobovniczaSize(100*1024), // 100 KB limit for each blobovnicza
+ WithWaitBeforeDropDB(0),
+ WithOpenedCacheSize(1000),
+ WithMoveBatchSize(3))
+ require.NoError(t, b.Open(mode.ComponentReadWrite))
+ require.NoError(t, b.Init())
+
+ storageIDs := make(map[oid.Address][]byte)
+ for i := 0; i < 100; i++ {
+ obj := blobstortest.NewObject(64 * 1024) // 64KB object
+ data, err := obj.Marshal()
+ require.NoError(t, err)
+ var prm common.PutPrm
+ prm.Address = object.AddressOf(obj)
+ prm.RawData = data
+ res, err := b.Put(context.Background(), prm)
+ require.NoError(t, err)
+ storageIDs[prm.Address] = res.StorageID
+ }
+ metaStub := &storageIDUpdateStub{
+ storageIDs: storageIDs,
+ guard: &sync.Mutex{},
+ }
+ rRes, err := b.Rebuild(context.Background(), common.RebuildPrm{
+ MetaStorage: metaStub,
+ WorkerLimiter: &rebuildLimiterStub{},
+ Action: common.RebuildAction{
+ SchemaChange: false,
+ FillPercent: true,
+ FillPercentValue: 60,
+ },
+ })
+ require.NoError(t, err)
+ dataMigrated := rRes.ObjectsMoved > 0 || rRes.FilesRemoved > 0 || metaStub.updatedCount > 0
+ require.False(t, dataMigrated)
+
+ for addr, storageID := range storageIDs {
+ var gPrm common.GetPrm
+ gPrm.Address = addr
+ gPrm.StorageID = storageID
+ _, err := b.Get(context.Background(), gPrm)
+ require.NoError(t, err)
+ }
+
+ require.NoError(t, b.Close())
+ })
+
+ t.Run("no rebuild single db", func(t *testing.T) {
+ t.Parallel()
+
+ dir := t.TempDir()
+ b := NewBlobovniczaTree(
+ context.Background(),
+ WithLogger(test.NewLogger(t)),
+ WithObjectSizeLimit(64*1024),
+ WithBlobovniczaShallowWidth(1), // single directory
+ WithBlobovniczaShallowDepth(1),
+ WithRootPath(dir),
+ WithBlobovniczaSize(100*1024), // 100 KB soft limit for each blobovnicza
+ WithWaitBeforeDropDB(0),
+ WithOpenedCacheSize(1000),
+ WithMoveBatchSize(3))
+ require.NoError(t, b.Open(mode.ComponentReadWrite))
+ require.NoError(t, b.Init())
+
+ storageIDs := make(map[oid.Address][]byte)
+ obj := blobstortest.NewObject(64 * 1024) // 64KB object
+ data, err := obj.Marshal()
+ require.NoError(t, err)
+ var prm common.PutPrm
+ prm.Address = object.AddressOf(obj)
+ prm.RawData = data
+ res, err := b.Put(context.Background(), prm)
+ require.NoError(t, err)
+ storageIDs[prm.Address] = res.StorageID
+ metaStub := &storageIDUpdateStub{
+ storageIDs: storageIDs,
+ guard: &sync.Mutex{},
+ }
+ rRes, err := b.Rebuild(context.Background(), common.RebuildPrm{
+ MetaStorage: metaStub,
+ WorkerLimiter: &rebuildLimiterStub{},
+ Action: common.RebuildAction{
+ SchemaChange: false,
+ FillPercent: true,
+ FillPercentValue: 90, // 64KB / 100KB = 64%
+ },
+ })
+ require.NoError(t, err)
+ dataMigrated := rRes.ObjectsMoved > 0 || rRes.FilesRemoved > 0 || metaStub.updatedCount > 0
+ require.False(t, dataMigrated)
+
+ for addr, storageID := range storageIDs {
+ var gPrm common.GetPrm
+ gPrm.Address = addr
+ gPrm.StorageID = storageID
+ _, err := b.Get(context.Background(), gPrm)
+ require.NoError(t, err)
+ }
+
+ require.NoError(t, b.Close())
+ })
+
+ t.Run("rebuild by fill percent", func(t *testing.T) {
+ t.Parallel()
+
+ dir := t.TempDir()
+ b := NewBlobovniczaTree(
+ context.Background(),
+ WithLogger(test.NewLogger(t)),
+ WithObjectSizeLimit(64*1024),
+ WithBlobovniczaShallowWidth(1), // single directory
+ WithBlobovniczaShallowDepth(1),
+ WithRootPath(dir),
+ WithBlobovniczaSize(100*1024), // 100 KB limit for each blobovnicza
+ WithWaitBeforeDropDB(0),
+ WithOpenedCacheSize(1000),
+ WithMoveBatchSize(3))
+ require.NoError(t, b.Open(mode.ComponentReadWrite))
+ require.NoError(t, b.Init())
+
+ storageIDs := make(map[oid.Address][]byte)
+ toDelete := make(map[oid.Address][]byte)
+ for i := 0; i < 100; i++ { // 2 objects for one blobovnicza, so 50 DBs total will be created
+ obj := blobstortest.NewObject(64 * 1024)
+ data, err := obj.Marshal()
+ require.NoError(t, err)
+ var prm common.PutPrm
+ prm.Address = object.AddressOf(obj)
+ prm.RawData = data
+ res, err := b.Put(context.Background(), prm)
+ require.NoError(t, err)
+ storageIDs[prm.Address] = res.StorageID
+ if i%2 == 1 {
+ toDelete[prm.Address] = res.StorageID
+ }
+ }
+ for addr, storageID := range toDelete {
+ var prm common.DeletePrm
+ prm.Address = addr
+ prm.StorageID = storageID
+ _, err := b.Delete(context.Background(), prm)
+ require.NoError(t, err)
+ }
+ metaStub := &storageIDUpdateStub{
+ storageIDs: storageIDs,
+ guard: &sync.Mutex{},
+ }
+ rRes, err := b.Rebuild(context.Background(), common.RebuildPrm{
+ MetaStorage: metaStub,
+ WorkerLimiter: &rebuildLimiterStub{},
+ Action: common.RebuildAction{
+ SchemaChange: false,
+ FillPercent: true,
+ FillPercentValue: 80,
+ },
+ })
+ require.NoError(t, err)
+ require.Equal(t, uint64(49), rRes.FilesRemoved)
+ require.Equal(t, uint64(49), rRes.ObjectsMoved) // 49 DBs with 1 objects
+ require.Equal(t, uint64(49), metaStub.updatedCount)
+
+ for addr, storageID := range storageIDs {
+ if _, found := toDelete[addr]; found {
+ continue
+ }
+ var gPrm common.GetPrm
+ gPrm.Address = addr
+ gPrm.StorageID = storageID
+ _, err := b.Get(context.Background(), gPrm)
+ require.NoError(t, err)
+ }
+
+ require.NoError(t, b.Close())
+ })
+}
+
func TestBlobovniczaTreeRebuildLargeObject(t *testing.T) {
t.Parallel()
@@ -92,6 +283,7 @@ func TestBlobovniczaTreeRebuildLargeObject(t *testing.T) {
var rPrm common.RebuildPrm
rPrm.MetaStorage = metaStub
rPrm.WorkerLimiter = &rebuildLimiterStub{}
+ rPrm.Action = common.RebuildAction{SchemaChange: true}
rRes, err := b.Rebuild(context.Background(), rPrm)
require.NoError(t, err)
dataMigrated := rRes.ObjectsMoved > 0 || rRes.FilesRemoved > 0 || metaStub.updatedCount > 0
@@ -180,6 +372,7 @@ func testBlobovniczaTreeRebuildHelper(t *testing.T, sourceDepth, sourceWidth, ta
var rPrm common.RebuildPrm
rPrm.MetaStorage = metaStub
rPrm.WorkerLimiter = &rebuildLimiterStub{}
+ rPrm.Action = common.RebuildAction{SchemaChange: true}
rRes, err := b.Rebuild(context.Background(), rPrm)
require.NoError(t, err)
dataMigrated := rRes.ObjectsMoved > 0 || rRes.FilesRemoved > 0 || metaStub.updatedCount > 0
diff --git a/pkg/local_object_storage/blobstor/common/rebuild.go b/pkg/local_object_storage/blobstor/common/rebuild.go
index 9f629ef8c..020d9d022 100644
--- a/pkg/local_object_storage/blobstor/common/rebuild.go
+++ b/pkg/local_object_storage/blobstor/common/rebuild.go
@@ -11,9 +11,17 @@ type RebuildRes struct {
FilesRemoved uint64
}
+type RebuildAction struct {
+ SchemaChange bool
+
+ FillPercent bool
+ FillPercentValue int
+}
+
type RebuildPrm struct {
MetaStorage MetaStorage
WorkerLimiter ConcurrentWorkersLimiter
+ Action RebuildAction
}
type MetaStorage interface {
diff --git a/pkg/local_object_storage/blobstor/rebuild.go b/pkg/local_object_storage/blobstor/rebuild.go
index 101c60752..31bc2d167 100644
--- a/pkg/local_object_storage/blobstor/rebuild.go
+++ b/pkg/local_object_storage/blobstor/rebuild.go
@@ -18,13 +18,14 @@ type ConcurrentWorkersLimiter interface {
ReleaseWorkSlot()
}
-func (b *BlobStor) Rebuild(ctx context.Context, upd StorageIDUpdate, limiter ConcurrentWorkersLimiter) error {
+func (b *BlobStor) Rebuild(ctx context.Context, upd StorageIDUpdate, limiter ConcurrentWorkersLimiter, action common.RebuildAction) error {
var summary common.RebuildRes
var rErr error
for _, storage := range b.storage {
res, err := storage.Storage.Rebuild(ctx, common.RebuildPrm{
MetaStorage: upd,
WorkerLimiter: limiter,
+ Action: action,
})
summary.FilesRemoved += res.FilesRemoved
summary.ObjectsMoved += res.ObjectsMoved
diff --git a/pkg/local_object_storage/shard/rebuild.go b/pkg/local_object_storage/shard/rebuild.go
new file mode 100644
index 000000000..998fcf08b
--- /dev/null
+++ b/pkg/local_object_storage/shard/rebuild.go
@@ -0,0 +1,173 @@
+package shard
+
+import (
+ "context"
+ "errors"
+ "sync"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
+ meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "go.uber.org/zap"
+)
+
+type RebuildWorkerLimiter interface {
+ AcquireWorkSlot(ctx context.Context) error
+ ReleaseWorkSlot()
+}
+
+type rebuildLimiter struct {
+ semaphore chan struct{}
+}
+
+func newRebuildLimiter(workersCount uint32) *rebuildLimiter {
+ return &rebuildLimiter{
+ semaphore: make(chan struct{}, workersCount),
+ }
+}
+
+func (l *rebuildLimiter) AcquireWorkSlot(ctx context.Context) error {
+ select {
+ case l.semaphore <- struct{}{}:
+ return nil
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+}
+
+func (l *rebuildLimiter) ReleaseWorkSlot() {
+ <-l.semaphore
+}
+
+type rebuildTask struct {
+ limiter RebuildWorkerLimiter
+ action common.RebuildAction
+}
+
+type rebuilder struct {
+ mtx *sync.Mutex
+ wg *sync.WaitGroup
+ cancel func()
+ limiter RebuildWorkerLimiter
+ done chan struct{}
+ tasks chan rebuildTask
+}
+
+func newRebuilder(l RebuildWorkerLimiter) *rebuilder {
+ return &rebuilder{
+ mtx: &sync.Mutex{},
+ wg: &sync.WaitGroup{},
+ limiter: l,
+ tasks: make(chan rebuildTask, 10),
+ }
+}
+
+func (r *rebuilder) Start(ctx context.Context, bs *blobstor.BlobStor, mb *meta.DB, log *logger.Logger) {
+ r.mtx.Lock()
+ defer r.mtx.Unlock()
+
+ if r.done != nil {
+ return // already started
+ }
+ ctx, cancel := context.WithCancel(ctx)
+ r.cancel = cancel
+ r.done = make(chan struct{})
+ r.wg.Add(1)
+ go func() {
+ defer r.wg.Done()
+ for {
+ select {
+ case <-r.done:
+ return
+ case t, ok := <-r.tasks:
+ if !ok {
+ continue
+ }
+ runRebuild(ctx, bs, mb, log, t.action, t.limiter)
+ }
+ }
+ }()
+ select {
+ case <-ctx.Done():
+ return
+ case r.tasks <- rebuildTask{
+ limiter: r.limiter,
+ action: common.RebuildAction{
+ SchemaChange: true,
+ },
+ }:
+ return
+ }
+}
+
+func runRebuild(ctx context.Context, bs *blobstor.BlobStor, mb *meta.DB, log *logger.Logger,
+ action common.RebuildAction, limiter RebuildWorkerLimiter,
+) {
+ select {
+ case <-ctx.Done():
+ return
+ default:
+ }
+ log.Info(logs.BlobstoreRebuildStarted)
+ if err := bs.Rebuild(ctx, &mbStorageIDUpdate{mb: mb}, limiter, action); err != nil {
+ log.Warn(logs.FailedToRebuildBlobstore, zap.Error(err))
+ } else {
+ log.Info(logs.BlobstoreRebuildCompletedSuccessfully)
+ }
+}
+
+func (r *rebuilder) ScheduleRebuild(ctx context.Context, limiter RebuildWorkerLimiter, action common.RebuildAction,
+) error {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ case r.tasks <- rebuildTask{
+ limiter: limiter,
+ action: action,
+ }:
+ return nil
+ }
+}
+
+func (r *rebuilder) Stop(log *logger.Logger) {
+ r.mtx.Lock()
+ defer r.mtx.Unlock()
+
+ if r.done != nil {
+ close(r.done)
+ }
+ if r.cancel != nil {
+ r.cancel()
+ }
+ r.wg.Wait()
+ r.cancel = nil
+ r.done = nil
+ log.Info(logs.BlobstoreRebuildStopped)
+}
+
+var errMBIsNotAvailable = errors.New("metabase is not available")
+
+type mbStorageIDUpdate struct {
+ mb *meta.DB
+}
+
+func (u *mbStorageIDUpdate) UpdateStorageID(ctx context.Context, addr oid.Address, storageID []byte) error {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+
+ if u.mb == nil {
+ return errMBIsNotAvailable
+ }
+
+ var prm meta.UpdateStorageIDPrm
+ prm.SetAddress(addr)
+ prm.SetStorageID(storageID)
+ _, err := u.mb.UpdateStorageID(ctx, prm)
+ return err
+}
diff --git a/pkg/local_object_storage/shard/rebuild_limiter.go b/pkg/local_object_storage/shard/rebuild_limiter.go
deleted file mode 100644
index efc21837c..000000000
--- a/pkg/local_object_storage/shard/rebuild_limiter.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package shard
-
-import "context"
-
-type RebuildWorkerLimiter interface {
- AcquireWorkSlot(ctx context.Context) error
- ReleaseWorkSlot()
-}
-
-type noopRebuildLimiter struct{}
-
-func (l *noopRebuildLimiter) AcquireWorkSlot(context.Context) error { return nil }
-func (l *noopRebuildLimiter) ReleaseWorkSlot() {}
diff --git a/pkg/local_object_storage/shard/rebuilder.go b/pkg/local_object_storage/shard/rebuilder.go
deleted file mode 100644
index f18573c57..000000000
--- a/pkg/local_object_storage/shard/rebuilder.go
+++ /dev/null
@@ -1,98 +0,0 @@
-package shard
-
-import (
- "context"
- "errors"
- "sync"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
- meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "go.uber.org/zap"
-)
-
-type rebuilder struct {
- mtx *sync.Mutex
- wg *sync.WaitGroup
- cancel func()
- limiter RebuildWorkerLimiter
-}
-
-func newRebuilder(l RebuildWorkerLimiter) *rebuilder {
- return &rebuilder{
- mtx: &sync.Mutex{},
- wg: &sync.WaitGroup{},
- cancel: nil,
- limiter: l,
- }
-}
-
-func (r *rebuilder) Start(ctx context.Context, bs *blobstor.BlobStor, mb *meta.DB, log *logger.Logger) {
- r.mtx.Lock()
- defer r.mtx.Unlock()
-
- r.start(ctx, bs, mb, log)
-}
-
-func (r *rebuilder) start(ctx context.Context, bs *blobstor.BlobStor, mb *meta.DB, log *logger.Logger) {
- if r.cancel != nil {
- r.stop(log)
- }
- ctx, cancel := context.WithCancel(ctx)
- r.cancel = cancel
- r.wg.Add(1)
- go func() {
- defer r.wg.Done()
-
- log.Info(logs.BlobstoreRebuildStarted)
- if err := bs.Rebuild(ctx, &mbStorageIDUpdate{mb: mb}, r.limiter); err != nil {
- log.Warn(logs.FailedToRebuildBlobstore, zap.Error(err))
- } else {
- log.Info(logs.BlobstoreRebuildCompletedSuccessfully)
- }
- }()
-}
-
-func (r *rebuilder) Stop(log *logger.Logger) {
- r.mtx.Lock()
- defer r.mtx.Unlock()
-
- r.stop(log)
-}
-
-func (r *rebuilder) stop(log *logger.Logger) {
- if r.cancel == nil {
- return
- }
-
- r.cancel()
- r.wg.Wait()
- r.cancel = nil
- log.Info(logs.BlobstoreRebuildStopped)
-}
-
-var errMBIsNotAvailable = errors.New("metabase is not available")
-
-type mbStorageIDUpdate struct {
- mb *meta.DB
-}
-
-func (u *mbStorageIDUpdate) UpdateStorageID(ctx context.Context, addr oid.Address, storageID []byte) error {
- select {
- case <-ctx.Done():
- return ctx.Err()
- default:
- }
-
- if u.mb == nil {
- return errMBIsNotAvailable
- }
-
- var prm meta.UpdateStorageIDPrm
- prm.SetAddress(addr)
- prm.SetStorageID(storageID)
- _, err := u.mb.UpdateStorageID(ctx, prm)
- return err
-}
diff --git a/pkg/local_object_storage/shard/shard.go b/pkg/local_object_storage/shard/shard.go
index ac389b506..1eaee8815 100644
--- a/pkg/local_object_storage/shard/shard.go
+++ b/pkg/local_object_storage/shard/shard.go
@@ -151,7 +151,7 @@ func defaultCfg() *cfg {
log: &logger.Logger{Logger: zap.L()},
gcCfg: defaultGCCfg(),
reportErrorFunc: func(string, string, error) {},
- rebuildLimiter: &noopRebuildLimiter{},
+ rebuildLimiter: newRebuildLimiter(1),
zeroSizeContainersCallback: func(context.Context, []cid.ID) {},
zeroCountContainersCallback: func(context.Context, []cid.ID) {},
}
From 6b6eabe41cd5750257adc3041f6a1c28df8197c7 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Thu, 29 Aug 2024 12:11:19 +0300
Subject: [PATCH 084/705] [#1337] cli: Add `control shards rebuild` command
Signed-off-by: Dmitrii Stepanov
---
.../modules/control/rebuild_shards.go | 88 ++
cmd/frostfs-cli/modules/control/shards.go | 1 +
pkg/local_object_storage/engine/rebuild.go | 90 ++
pkg/local_object_storage/shard/rebuild.go | 33 +
pkg/services/control/rpc.go | 14 +
pkg/services/control/server/rebuild.go | 59 ++
pkg/services/control/service.proto | 29 +
pkg/services/control/service_frostfs.pb.go | 918 ++++++++++++++++++
pkg/services/control/service_grpc.pb.go | 39 +
9 files changed, 1271 insertions(+)
create mode 100644 cmd/frostfs-cli/modules/control/rebuild_shards.go
create mode 100644 pkg/local_object_storage/engine/rebuild.go
create mode 100644 pkg/services/control/server/rebuild.go
diff --git a/cmd/frostfs-cli/modules/control/rebuild_shards.go b/cmd/frostfs-cli/modules/control/rebuild_shards.go
new file mode 100644
index 000000000..e2b408712
--- /dev/null
+++ b/cmd/frostfs-cli/modules/control/rebuild_shards.go
@@ -0,0 +1,88 @@
+package control
+
+import (
+ "fmt"
+
+ rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
+ commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
+ "github.com/mr-tron/base58"
+ "github.com/spf13/cobra"
+)
+
+const (
+ fillPercentFlag = "fill_percent"
+)
+
+var shardsRebuildCmd = &cobra.Command{
+ Use: "rebuild",
+ Short: "Rebuild shards",
+ Long: "Rebuild reclaims storage occupied by dead objects and adjusts the storage structure according to the configuration (for blobovnicza only now)",
+ Run: shardsRebuild,
+}
+
+func shardsRebuild(cmd *cobra.Command, _ []string) {
+ pk := key.Get(cmd)
+
+ req := &control.StartShardRebuildRequest{
+ Body: &control.StartShardRebuildRequest_Body{
+ Shard_ID: getShardIDList(cmd),
+ TargetFillPercent: getFillPercentValue(cmd),
+ ConcurrencyLimit: getConcurrencyValue(cmd),
+ },
+ }
+
+ signRequest(cmd, pk, req)
+
+ cli := getClient(cmd, pk)
+
+ var resp *control.StartShardRebuildResponse
+ var err error
+ err = cli.ExecRaw(func(client *rawclient.Client) error {
+ resp, err = control.StartShardRebuild(client, req)
+ return err
+ })
+ commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
+
+ verifyResponse(cmd, resp.GetSignature(), resp.GetBody())
+
+ var success, failed uint
+ for _, res := range resp.GetBody().GetResults() {
+ if res.GetSuccess() {
+ success++
+ cmd.Printf("Shard %s: OK\n", base58.Encode(res.GetShard_ID()))
+ } else {
+ failed++
+ cmd.Printf("Shard %s: failed with error %q\n", base58.Encode(res.GetShard_ID()), res.GetError())
+ }
+ }
+ cmd.Printf("Total: %d success, %d failed\n", success, failed)
+}
+
+func getFillPercentValue(cmd *cobra.Command) uint32 {
+ v, _ := cmd.Flags().GetUint32(fillPercentFlag)
+ if v <= 0 || v > 100 {
+ commonCmd.ExitOnErr(cmd, "invalid fill_percent value", fmt.Errorf("fill_percent value must be (0, 100], current value: %d", v))
+ }
+ return v
+}
+
+func getConcurrencyValue(cmd *cobra.Command) uint32 {
+ v, _ := cmd.Flags().GetUint32(concurrencyFlag)
+ if v <= 0 || v > 10000 {
+ commonCmd.ExitOnErr(cmd, "invalid concurrency value", fmt.Errorf("concurrency value must be (0, 10 000], current value: %d", v))
+ }
+ return v
+}
+
+func initControlShardRebuildCmd() {
+ initControlFlags(shardsRebuildCmd)
+
+ flags := shardsRebuildCmd.Flags()
+ flags.StringSlice(shardIDFlag, nil, "List of shard IDs in base58 encoding")
+ flags.Bool(shardAllFlag, false, "Process all shards")
+ flags.Uint32(fillPercentFlag, 80, "Target fill percent to reclaim space")
+ flags.Uint32(concurrencyFlag, 20, "Maximum count of concurrently rebuilding files")
+ setShardModeCmd.MarkFlagsMutuallyExclusive(shardIDFlag, shardAllFlag)
+}
diff --git a/cmd/frostfs-cli/modules/control/shards.go b/cmd/frostfs-cli/modules/control/shards.go
index d8198c426..d6c2a0b9b 100644
--- a/cmd/frostfs-cli/modules/control/shards.go
+++ b/cmd/frostfs-cli/modules/control/shards.go
@@ -28,4 +28,5 @@ func initControlShardsCmd() {
initControlDoctorCmd()
initControlShardsWritecacheCmd()
initControlShardsDetachCmd()
+ initControlShardRebuildCmd()
}
diff --git a/pkg/local_object_storage/engine/rebuild.go b/pkg/local_object_storage/engine/rebuild.go
new file mode 100644
index 000000000..3970aae89
--- /dev/null
+++ b/pkg/local_object_storage/engine/rebuild.go
@@ -0,0 +1,90 @@
+package engine
+
+import (
+ "context"
+ "sync"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
+ "golang.org/x/sync/errgroup"
+)
+
+type RebuildPrm struct {
+ ShardIDs []*shard.ID
+ ConcurrencyLimit uint32
+ TargetFillPercent uint32
+}
+
+type ShardRebuildResult struct {
+ ShardID *shard.ID
+ Success bool
+ ErrorMsg string
+}
+
+type RebuildRes struct {
+ ShardResults []ShardRebuildResult
+}
+
+func (e *StorageEngine) Rebuild(ctx context.Context, prm RebuildPrm) (RebuildRes, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.Rebuild",
+ trace.WithAttributes(
+ attribute.Int("shard_id_count", len(prm.ShardIDs)),
+ attribute.Int64("target_fill_percent", int64(prm.TargetFillPercent)),
+ attribute.Int64("concurrency_limit", int64(prm.ConcurrencyLimit)),
+ ))
+ defer span.End()
+
+ res := RebuildRes{
+ ShardResults: make([]ShardRebuildResult, 0, len(prm.ShardIDs)),
+ }
+ resGuard := &sync.Mutex{}
+
+ limiter := newRebuildLimiter(prm.ConcurrencyLimit)
+
+ eg, egCtx := errgroup.WithContext(ctx)
+ for _, shardID := range prm.ShardIDs {
+ eg.Go(func() error {
+ e.mtx.RLock()
+ sh, ok := e.shards[shardID.String()]
+ e.mtx.RUnlock()
+
+ if !ok {
+ resGuard.Lock()
+ defer resGuard.Unlock()
+ res.ShardResults = append(res.ShardResults, ShardRebuildResult{
+ ShardID: shardID,
+ ErrorMsg: errShardNotFound.Error(),
+ })
+ return nil
+ }
+
+ err := sh.ScheduleRebuild(egCtx, shard.RebuildPrm{
+ ConcurrencyLimiter: limiter,
+ TargetFillPercent: prm.TargetFillPercent,
+ })
+
+ resGuard.Lock()
+ defer resGuard.Unlock()
+
+ if err != nil {
+ res.ShardResults = append(res.ShardResults, ShardRebuildResult{
+ ShardID: shardID,
+ ErrorMsg: err.Error(),
+ })
+ } else {
+ res.ShardResults = append(res.ShardResults, ShardRebuildResult{
+ ShardID: shardID,
+ Success: true,
+ })
+ }
+ return nil
+ })
+ }
+
+ if err := eg.Wait(); err != nil {
+ return RebuildRes{}, err
+ }
+ return res, nil
+}
diff --git a/pkg/local_object_storage/shard/rebuild.go b/pkg/local_object_storage/shard/rebuild.go
index 998fcf08b..f8051999e 100644
--- a/pkg/local_object_storage/shard/rebuild.go
+++ b/pkg/local_object_storage/shard/rebuild.go
@@ -10,7 +10,10 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
)
@@ -171,3 +174,33 @@ func (u *mbStorageIDUpdate) UpdateStorageID(ctx context.Context, addr oid.Addres
_, err := u.mb.UpdateStorageID(ctx, prm)
return err
}
+
+type RebuildPrm struct {
+ ConcurrencyLimiter RebuildWorkerLimiter
+ TargetFillPercent uint32
+}
+
+func (s *Shard) ScheduleRebuild(ctx context.Context, p RebuildPrm) error {
+ ctx, span := tracing.StartSpanFromContext(ctx, "Shard.ScheduleRebuild",
+ trace.WithAttributes(
+ attribute.String("shard_id", s.ID().String()),
+ attribute.Int64("target_fill_percent", int64(p.TargetFillPercent)),
+ ))
+ defer span.End()
+
+ s.m.RLock()
+ defer s.m.RUnlock()
+
+ if s.info.Mode.ReadOnly() {
+ return ErrReadOnlyMode
+ }
+ if s.info.Mode.NoMetabase() {
+ return ErrDegradedMode
+ }
+
+ return s.rb.ScheduleRebuild(ctx, p.ConcurrencyLimiter, common.RebuildAction{
+ SchemaChange: true,
+ FillPercent: true,
+ FillPercentValue: int(p.TargetFillPercent),
+ })
+}
diff --git a/pkg/services/control/rpc.go b/pkg/services/control/rpc.go
index a90e58a65..80aece008 100644
--- a/pkg/services/control/rpc.go
+++ b/pkg/services/control/rpc.go
@@ -30,6 +30,7 @@ const (
rpcSealWriteCache = "SealWriteCache"
rpcListTargetsLocalOverrides = "ListTargetsLocalOverrides"
rpcDetachShards = "DetachShards"
+ rpcStartShardRebuild = "StartShardRebuild"
)
// HealthCheck executes ControlService.HealthCheck RPC.
@@ -361,3 +362,16 @@ func DetachShards(
return wResp.message, nil
}
+
+// StartShardRebuild executes ControlService.StartShardRebuild RPC.
+func StartShardRebuild(cli *client.Client, req *StartShardRebuildRequest, opts ...client.CallOption) (*StartShardRebuildResponse, error) {
+ wResp := newResponseWrapper[StartShardRebuildResponse]()
+ wReq := &requestWrapper{m: req}
+
+ err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcStartShardRebuild), wReq, wResp, opts...)
+ if err != nil {
+ return nil, err
+ }
+
+ return wResp.message, nil
+}
diff --git a/pkg/services/control/server/rebuild.go b/pkg/services/control/server/rebuild.go
new file mode 100644
index 000000000..6ddfb8bf4
--- /dev/null
+++ b/pkg/services/control/server/rebuild.go
@@ -0,0 +1,59 @@
+package control
+
+import (
+ "context"
+ "fmt"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
+)
+
+func (s *Server) StartShardRebuild(ctx context.Context, req *control.StartShardRebuildRequest) (*control.StartShardRebuildResponse, error) {
+ err := s.isValidRequest(req)
+ if err != nil {
+ return nil, status.Error(codes.PermissionDenied, err.Error())
+ }
+
+ if req.GetBody().GetConcurrencyLimit() == 0 || req.GetBody().GetConcurrencyLimit() > 10000 {
+ return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("concurrency limit must be in range (0; 10 000], current value %d", req.GetBody().GetConcurrencyLimit()))
+ }
+
+ if req.GetBody().GetTargetFillPercent() == 0 || req.GetBody().GetTargetFillPercent() > 100 {
+ return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("fill percent must be in range (0; 100], current value %d", req.GetBody().GetTargetFillPercent()))
+ }
+
+ prm := engine.RebuildPrm{
+ ShardIDs: s.getShardIDList(req.GetBody().GetShard_ID()),
+ ConcurrencyLimit: req.GetBody().GetConcurrencyLimit(),
+ TargetFillPercent: req.GetBody().GetTargetFillPercent(),
+ }
+
+ res, err := s.s.Rebuild(ctx, prm)
+ if err != nil {
+ return nil, status.Error(codes.Internal, err.Error())
+ }
+
+ resp := &control.StartShardRebuildResponse{Body: &control.StartShardRebuildResponse_Body{}}
+ for _, r := range res.ShardResults {
+ if r.Success {
+ resp.Body.Results = append(resp.GetBody().GetResults(), control.StartShardRebuildResponse_Body_Status{
+ Shard_ID: *r.ShardID,
+ Success: true,
+ })
+ } else {
+ resp.Body.Results = append(resp.GetBody().GetResults(), control.StartShardRebuildResponse_Body_Status{
+ Shard_ID: *r.ShardID,
+ Error: r.ErrorMsg,
+ })
+ }
+ }
+
+ err = ctrlmessage.Sign(s.key, resp)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
diff --git a/pkg/services/control/service.proto b/pkg/services/control/service.proto
index d6639cb48..04994328a 100644
--- a/pkg/services/control/service.proto
+++ b/pkg/services/control/service.proto
@@ -91,6 +91,9 @@ service ControlService {
// DetachShards detaches and closes shards.
rpc DetachShards(DetachShardsRequest) returns (DetachShardsResponse);
+
+ // StartShardRebuild starts shard rebuild process.
+ rpc StartShardRebuild(StartShardRebuildRequest) returns (StartShardRebuildResponse);
}
// Health check request.
@@ -699,3 +702,29 @@ message DetachShardsResponse {
Signature signature = 2;
}
+
+message StartShardRebuildRequest {
+ message Body {
+ repeated bytes shard_ID = 1;
+ uint32 target_fill_percent = 2;
+ uint32 concurrency_limit = 3;
+ }
+
+ Body body = 1;
+ Signature signature = 2;
+}
+
+message StartShardRebuildResponse {
+ message Body {
+ message Status {
+ bytes shard_ID = 1;
+ bool success = 2;
+ string error = 3;
+ }
+ repeated Status results = 1;
+ }
+
+ Body body = 1;
+
+ Signature signature = 2;
+}
diff --git a/pkg/services/control/service_frostfs.pb.go b/pkg/services/control/service_frostfs.pb.go
index eb0d95c64..019cac290 100644
--- a/pkg/services/control/service_frostfs.pb.go
+++ b/pkg/services/control/service_frostfs.pb.go
@@ -15023,3 +15023,921 @@ func (x *DetachShardsResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
in.Consumed()
}
}
+
+type StartShardRebuildRequest_Body struct {
+ Shard_ID [][]byte `json:"shardID"`
+ TargetFillPercent uint32 `json:"targetFillPercent"`
+ ConcurrencyLimit uint32 `json:"concurrencyLimit"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*StartShardRebuildRequest_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*StartShardRebuildRequest_Body)(nil)
+ _ json.Marshaler = (*StartShardRebuildRequest_Body)(nil)
+ _ json.Unmarshaler = (*StartShardRebuildRequest_Body)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *StartShardRebuildRequest_Body) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ size += proto.RepeatedBytesSize(1, x.Shard_ID)
+ size += proto.UInt32Size(2, x.TargetFillPercent)
+ size += proto.UInt32Size(3, x.ConcurrencyLimit)
+ return size
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *StartShardRebuildRequest_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *StartShardRebuildRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ for j := range x.Shard_ID {
+ mm.AppendBytes(1, x.Shard_ID[j])
+ }
+ if x.TargetFillPercent != 0 {
+ mm.AppendUint32(2, x.TargetFillPercent)
+ }
+ if x.ConcurrencyLimit != 0 {
+ mm.AppendUint32(3, x.ConcurrencyLimit)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *StartShardRebuildRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "StartShardRebuildRequest_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // Shard_ID
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Shard_ID")
+ }
+ x.Shard_ID = append(x.Shard_ID, data)
+ case 2: // TargetFillPercent
+ data, ok := fc.Uint32()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "TargetFillPercent")
+ }
+ x.TargetFillPercent = data
+ case 3: // ConcurrencyLimit
+ data, ok := fc.Uint32()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "ConcurrencyLimit")
+ }
+ x.ConcurrencyLimit = data
+ }
+ }
+ return nil
+}
+func (x *StartShardRebuildRequest_Body) GetShard_ID() [][]byte {
+ if x != nil {
+ return x.Shard_ID
+ }
+ return nil
+}
+func (x *StartShardRebuildRequest_Body) SetShard_ID(v [][]byte) {
+ x.Shard_ID = v
+}
+func (x *StartShardRebuildRequest_Body) GetTargetFillPercent() uint32 {
+ if x != nil {
+ return x.TargetFillPercent
+ }
+ return 0
+}
+func (x *StartShardRebuildRequest_Body) SetTargetFillPercent(v uint32) {
+ x.TargetFillPercent = v
+}
+func (x *StartShardRebuildRequest_Body) GetConcurrencyLimit() uint32 {
+ if x != nil {
+ return x.ConcurrencyLimit
+ }
+ return 0
+}
+func (x *StartShardRebuildRequest_Body) SetConcurrencyLimit(v uint32) {
+ x.ConcurrencyLimit = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *StartShardRebuildRequest_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *StartShardRebuildRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"shardID\":"
+ out.RawString(prefix[1:])
+ out.RawByte('[')
+ for i := range x.Shard_ID {
+ if i != 0 {
+ out.RawByte(',')
+ }
+ out.Base64Bytes(x.Shard_ID[i])
+ }
+ out.RawByte(']')
+ }
+ {
+ const prefix string = ",\"targetFillPercent\":"
+ out.RawString(prefix)
+ out.Uint32(x.TargetFillPercent)
+ }
+ {
+ const prefix string = ",\"concurrencyLimit\":"
+ out.RawString(prefix)
+ out.Uint32(x.ConcurrencyLimit)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *StartShardRebuildRequest_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *StartShardRebuildRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "shardID":
+ {
+ var f []byte
+ var list [][]byte
+ in.Delim('[')
+ for !in.IsDelim(']') {
+ f = in.Bytes()
+ list = append(list, f)
+ in.WantComma()
+ }
+ x.Shard_ID = list
+ in.Delim(']')
+ }
+ case "targetFillPercent":
+ {
+ var f uint32
+ f = in.Uint32()
+ x.TargetFillPercent = f
+ }
+ case "concurrencyLimit":
+ {
+ var f uint32
+ f = in.Uint32()
+ x.ConcurrencyLimit = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type StartShardRebuildRequest struct {
+ Body *StartShardRebuildRequest_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*StartShardRebuildRequest)(nil)
+ _ encoding.ProtoUnmarshaler = (*StartShardRebuildRequest)(nil)
+ _ json.Marshaler = (*StartShardRebuildRequest)(nil)
+ _ json.Unmarshaler = (*StartShardRebuildRequest)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *StartShardRebuildRequest) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ size += proto.NestedStructureSize(1, x.Body)
+ size += proto.NestedStructureSize(2, x.Signature)
+ return size
+}
+
+// ReadSignedData fills buf with signed data of x.
+// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same signed data.
+func (x *StartShardRebuildRequest) SignedDataSize() int {
+ return x.GetBody().StableSize()
+}
+
+// SignedDataSize returns size of the request signed data in bytes.
+//
+// Structures with the same field values have the same signed data size.
+func (x *StartShardRebuildRequest) ReadSignedData(buf []byte) ([]byte, error) {
+ return x.GetBody().MarshalProtobuf(buf), nil
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *StartShardRebuildRequest) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *StartShardRebuildRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *StartShardRebuildRequest) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "StartShardRebuildRequest")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(StartShardRebuildRequest_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *StartShardRebuildRequest) GetBody() *StartShardRebuildRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *StartShardRebuildRequest) SetBody(v *StartShardRebuildRequest_Body) {
+ x.Body = v
+}
+func (x *StartShardRebuildRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *StartShardRebuildRequest) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *StartShardRebuildRequest) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *StartShardRebuildRequest) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ const prefix string = ",\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *StartShardRebuildRequest) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *StartShardRebuildRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *StartShardRebuildRequest_Body
+ f = new(StartShardRebuildRequest_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type StartShardRebuildResponse_Body_Status struct {
+ Shard_ID []byte `json:"shardID"`
+ Success bool `json:"success"`
+ Error string `json:"error"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*StartShardRebuildResponse_Body_Status)(nil)
+ _ encoding.ProtoUnmarshaler = (*StartShardRebuildResponse_Body_Status)(nil)
+ _ json.Marshaler = (*StartShardRebuildResponse_Body_Status)(nil)
+ _ json.Unmarshaler = (*StartShardRebuildResponse_Body_Status)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *StartShardRebuildResponse_Body_Status) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ size += proto.BytesSize(1, x.Shard_ID)
+ size += proto.BoolSize(2, x.Success)
+ size += proto.StringSize(3, x.Error)
+ return size
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *StartShardRebuildResponse_Body_Status) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *StartShardRebuildResponse_Body_Status) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if len(x.Shard_ID) != 0 {
+ mm.AppendBytes(1, x.Shard_ID)
+ }
+ if x.Success {
+ mm.AppendBool(2, x.Success)
+ }
+ if len(x.Error) != 0 {
+ mm.AppendString(3, x.Error)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *StartShardRebuildResponse_Body_Status) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "StartShardRebuildResponse_Body_Status")
+ }
+ switch fc.FieldNum {
+ case 1: // Shard_ID
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Shard_ID")
+ }
+ x.Shard_ID = data
+ case 2: // Success
+ data, ok := fc.Bool()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Success")
+ }
+ x.Success = data
+ case 3: // Error
+ data, ok := fc.String()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Error")
+ }
+ x.Error = data
+ }
+ }
+ return nil
+}
+func (x *StartShardRebuildResponse_Body_Status) GetShard_ID() []byte {
+ if x != nil {
+ return x.Shard_ID
+ }
+ return nil
+}
+func (x *StartShardRebuildResponse_Body_Status) SetShard_ID(v []byte) {
+ x.Shard_ID = v
+}
+func (x *StartShardRebuildResponse_Body_Status) GetSuccess() bool {
+ if x != nil {
+ return x.Success
+ }
+ return false
+}
+func (x *StartShardRebuildResponse_Body_Status) SetSuccess(v bool) {
+ x.Success = v
+}
+func (x *StartShardRebuildResponse_Body_Status) GetError() string {
+ if x != nil {
+ return x.Error
+ }
+ return ""
+}
+func (x *StartShardRebuildResponse_Body_Status) SetError(v string) {
+ x.Error = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *StartShardRebuildResponse_Body_Status) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *StartShardRebuildResponse_Body_Status) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"shardID\":"
+ out.RawString(prefix[1:])
+ out.Base64Bytes(x.Shard_ID)
+ }
+ {
+ const prefix string = ",\"success\":"
+ out.RawString(prefix)
+ out.Bool(x.Success)
+ }
+ {
+ const prefix string = ",\"error\":"
+ out.RawString(prefix)
+ out.String(x.Error)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *StartShardRebuildResponse_Body_Status) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *StartShardRebuildResponse_Body_Status) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "shardID":
+ {
+ var f []byte
+ f = in.Bytes()
+ x.Shard_ID = f
+ }
+ case "success":
+ {
+ var f bool
+ f = in.Bool()
+ x.Success = f
+ }
+ case "error":
+ {
+ var f string
+ f = in.String()
+ x.Error = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type StartShardRebuildResponse_Body struct {
+ Results []StartShardRebuildResponse_Body_Status `json:"results"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*StartShardRebuildResponse_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*StartShardRebuildResponse_Body)(nil)
+ _ json.Marshaler = (*StartShardRebuildResponse_Body)(nil)
+ _ json.Unmarshaler = (*StartShardRebuildResponse_Body)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *StartShardRebuildResponse_Body) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ for i := range x.Results {
+ size += proto.NestedStructureSizeUnchecked(1, &x.Results[i])
+ }
+ return size
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *StartShardRebuildResponse_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *StartShardRebuildResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ for i := range x.Results {
+ x.Results[i].EmitProtobuf(mm.AppendMessage(1))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *StartShardRebuildResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "StartShardRebuildResponse_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // Results
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Results")
+ }
+ x.Results = append(x.Results, StartShardRebuildResponse_Body_Status{})
+ ff := &x.Results[len(x.Results)-1]
+ if err := ff.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *StartShardRebuildResponse_Body) GetResults() []StartShardRebuildResponse_Body_Status {
+ if x != nil {
+ return x.Results
+ }
+ return nil
+}
+func (x *StartShardRebuildResponse_Body) SetResults(v []StartShardRebuildResponse_Body_Status) {
+ x.Results = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *StartShardRebuildResponse_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *StartShardRebuildResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"results\":"
+ out.RawString(prefix[1:])
+ out.RawByte('[')
+ for i := range x.Results {
+ if i != 0 {
+ out.RawByte(',')
+ }
+ x.Results[i].MarshalEasyJSON(out)
+ }
+ out.RawByte(']')
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *StartShardRebuildResponse_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *StartShardRebuildResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "results":
+ {
+ var f StartShardRebuildResponse_Body_Status
+ var list []StartShardRebuildResponse_Body_Status
+ in.Delim('[')
+ for !in.IsDelim(']') {
+ f = StartShardRebuildResponse_Body_Status{}
+ f.UnmarshalEasyJSON(in)
+ list = append(list, f)
+ in.WantComma()
+ }
+ x.Results = list
+ in.Delim(']')
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type StartShardRebuildResponse struct {
+ Body *StartShardRebuildResponse_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*StartShardRebuildResponse)(nil)
+ _ encoding.ProtoUnmarshaler = (*StartShardRebuildResponse)(nil)
+ _ json.Marshaler = (*StartShardRebuildResponse)(nil)
+ _ json.Unmarshaler = (*StartShardRebuildResponse)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *StartShardRebuildResponse) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ size += proto.NestedStructureSize(1, x.Body)
+ size += proto.NestedStructureSize(2, x.Signature)
+ return size
+}
+
+// ReadSignedData fills buf with signed data of x.
+// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same signed data.
+func (x *StartShardRebuildResponse) SignedDataSize() int {
+ return x.GetBody().StableSize()
+}
+
+// SignedDataSize returns size of the request signed data in bytes.
+//
+// Structures with the same field values have the same signed data size.
+func (x *StartShardRebuildResponse) ReadSignedData(buf []byte) ([]byte, error) {
+ return x.GetBody().MarshalProtobuf(buf), nil
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *StartShardRebuildResponse) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *StartShardRebuildResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *StartShardRebuildResponse) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "StartShardRebuildResponse")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(StartShardRebuildResponse_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *StartShardRebuildResponse) GetBody() *StartShardRebuildResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *StartShardRebuildResponse) SetBody(v *StartShardRebuildResponse_Body) {
+ x.Body = v
+}
+func (x *StartShardRebuildResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *StartShardRebuildResponse) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *StartShardRebuildResponse) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *StartShardRebuildResponse) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ const prefix string = ",\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *StartShardRebuildResponse) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *StartShardRebuildResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *StartShardRebuildResponse_Body
+ f = new(StartShardRebuildResponse_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
diff --git a/pkg/services/control/service_grpc.pb.go b/pkg/services/control/service_grpc.pb.go
index fa9de974a..f5cfefa85 100644
--- a/pkg/services/control/service_grpc.pb.go
+++ b/pkg/services/control/service_grpc.pb.go
@@ -41,6 +41,7 @@ const (
ControlService_ListTargetsLocalOverrides_FullMethodName = "/control.ControlService/ListTargetsLocalOverrides"
ControlService_SealWriteCache_FullMethodName = "/control.ControlService/SealWriteCache"
ControlService_DetachShards_FullMethodName = "/control.ControlService/DetachShards"
+ ControlService_StartShardRebuild_FullMethodName = "/control.ControlService/StartShardRebuild"
)
// ControlServiceClient is the client API for ControlService service.
@@ -97,6 +98,8 @@ type ControlServiceClient interface {
SealWriteCache(ctx context.Context, in *SealWriteCacheRequest, opts ...grpc.CallOption) (*SealWriteCacheResponse, error)
// DetachShards detaches and closes shards.
DetachShards(ctx context.Context, in *DetachShardsRequest, opts ...grpc.CallOption) (*DetachShardsResponse, error)
+ // StartShardRebuild starts shard rebuild process.
+ StartShardRebuild(ctx context.Context, in *StartShardRebuildRequest, opts ...grpc.CallOption) (*StartShardRebuildResponse, error)
}
type controlServiceClient struct {
@@ -305,6 +308,15 @@ func (c *controlServiceClient) DetachShards(ctx context.Context, in *DetachShard
return out, nil
}
+func (c *controlServiceClient) StartShardRebuild(ctx context.Context, in *StartShardRebuildRequest, opts ...grpc.CallOption) (*StartShardRebuildResponse, error) {
+ out := new(StartShardRebuildResponse)
+ err := c.cc.Invoke(ctx, ControlService_StartShardRebuild_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
// ControlServiceServer is the server API for ControlService service.
// All implementations should embed UnimplementedControlServiceServer
// for forward compatibility
@@ -359,6 +371,8 @@ type ControlServiceServer interface {
SealWriteCache(context.Context, *SealWriteCacheRequest) (*SealWriteCacheResponse, error)
// DetachShards detaches and closes shards.
DetachShards(context.Context, *DetachShardsRequest) (*DetachShardsResponse, error)
+ // StartShardRebuild starts shard rebuild process.
+ StartShardRebuild(context.Context, *StartShardRebuildRequest) (*StartShardRebuildResponse, error)
}
// UnimplementedControlServiceServer should be embedded to have forward compatible implementations.
@@ -431,6 +445,9 @@ func (UnimplementedControlServiceServer) SealWriteCache(context.Context, *SealWr
func (UnimplementedControlServiceServer) DetachShards(context.Context, *DetachShardsRequest) (*DetachShardsResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method DetachShards not implemented")
}
+func (UnimplementedControlServiceServer) StartShardRebuild(context.Context, *StartShardRebuildRequest) (*StartShardRebuildResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method StartShardRebuild not implemented")
+}
// UnsafeControlServiceServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to ControlServiceServer will
@@ -839,6 +856,24 @@ func _ControlService_DetachShards_Handler(srv interface{}, ctx context.Context,
return interceptor(ctx, in, info, handler)
}
+func _ControlService_StartShardRebuild_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(StartShardRebuildRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ControlServiceServer).StartShardRebuild(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: ControlService_StartShardRebuild_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ControlServiceServer).StartShardRebuild(ctx, req.(*StartShardRebuildRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
// ControlService_ServiceDesc is the grpc.ServiceDesc for ControlService service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
@@ -934,6 +969,10 @@ var ControlService_ServiceDesc = grpc.ServiceDesc{
MethodName: "DetachShards",
Handler: _ControlService_DetachShards_Handler,
},
+ {
+ MethodName: "StartShardRebuild",
+ Handler: _ControlService_StartShardRebuild_Handler,
+ },
},
Streams: []grpc.StreamDesc{},
Metadata: "pkg/services/control/service.proto",
From a61201a98753c4522a1169abe5b42f2631f639ad Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Thu, 29 Aug 2024 13:51:09 +0300
Subject: [PATCH 085/705] [#1337] config: Move `rebuild_worker_count` to shard
section
This makes it simple to limit performance degradation for every shard
because of rebuild.
Signed-off-by: Dmitrii Stepanov
---
cmd/frostfs-cli/modules/control/shards.go | 1 +
cmd/frostfs-node/config.go | 7 ++---
cmd/frostfs-node/config/engine/config.go | 11 --------
cmd/frostfs-node/config/engine/config_test.go | 4 +--
.../config/engine/shard/config.go | 15 +++++++++++
config/example/node.env | 2 +-
config/example/node.json | 2 +-
config/example/node.yaml | 2 +-
docs/storage-node-configuration.md | 1 +
pkg/local_object_storage/engine/engine.go | 16 ++----------
pkg/local_object_storage/engine/rebuild.go | 2 +-
.../engine/rebuild_limiter.go | 26 -------------------
pkg/local_object_storage/engine/shards.go | 1 -
pkg/local_object_storage/shard/control.go | 2 +-
pkg/local_object_storage/shard/rebuild.go | 2 +-
pkg/local_object_storage/shard/shard.go | 10 +++----
16 files changed, 36 insertions(+), 68 deletions(-)
delete mode 100644 pkg/local_object_storage/engine/rebuild_limiter.go
diff --git a/cmd/frostfs-cli/modules/control/shards.go b/cmd/frostfs-cli/modules/control/shards.go
index d6c2a0b9b..329cb9100 100644
--- a/cmd/frostfs-cli/modules/control/shards.go
+++ b/cmd/frostfs-cli/modules/control/shards.go
@@ -19,6 +19,7 @@ func initControlShardsCmd() {
shardsCmd.AddCommand(doctorCmd)
shardsCmd.AddCommand(writecacheShardCmd)
shardsCmd.AddCommand(shardsDetachCmd)
+ shardsCmd.AddCommand(shardsRebuildCmd)
initControlShardsListCmd()
initControlSetShardModeCmd()
diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go
index 110281418..b59518d14 100644
--- a/cmd/frostfs-node/config.go
+++ b/cmd/frostfs-node/config.go
@@ -109,7 +109,6 @@ type applicationConfiguration struct {
shardPoolSize uint32
shards []shardCfg
lowMem bool
- rebuildWorkers uint32
}
// if need to run node in compatibility with other versions mode
@@ -127,6 +126,8 @@ type shardCfg struct {
refillMetabaseWorkersCount int
mode shardmode.Mode
+ rebuildWorkersCount uint32
+
metaCfg struct {
path string
perm fs.FileMode
@@ -230,7 +231,6 @@ func (a *applicationConfiguration) readConfig(c *config.Config) error {
a.EngineCfg.errorThreshold = engineconfig.ShardErrorThreshold(c)
a.EngineCfg.shardPoolSize = engineconfig.ShardPoolSize(c)
a.EngineCfg.lowMem = engineconfig.EngineLowMemoryConsumption(c)
- a.EngineCfg.rebuildWorkers = engineconfig.EngineRebuildWorkersCount(c)
return engineconfig.IterateShards(c, false, func(sc *shardconfig.Config) error { return a.updateShardConfig(c, sc) })
}
@@ -240,6 +240,7 @@ func (a *applicationConfiguration) updateShardConfig(c *config.Config, oldConfig
newConfig.refillMetabase = oldConfig.RefillMetabase()
newConfig.refillMetabaseWorkersCount = oldConfig.RefillMetabaseWorkersCount()
+ newConfig.rebuildWorkersCount = oldConfig.RebuildWorkerCount()
newConfig.mode = oldConfig.Mode()
newConfig.compress = oldConfig.Compress()
newConfig.estimateCompressibility = oldConfig.EstimateCompressibility()
@@ -835,7 +836,6 @@ func (c *cfg) engineOpts() []engine.Option {
engine.WithErrorThreshold(c.EngineCfg.errorThreshold),
engine.WithLogger(c.log),
engine.WithLowMemoryConsumption(c.EngineCfg.lowMem),
- engine.WithRebuildWorkersCount(c.EngineCfg.rebuildWorkers),
)
if c.metricsCollector != nil {
@@ -998,6 +998,7 @@ func (c *cfg) getShardOpts(ctx context.Context, shCfg shardCfg) shardOptsWithID
shard.WithLogger(c.log),
shard.WithRefillMetabase(shCfg.refillMetabase),
shard.WithRefillMetabaseWorkersCount(shCfg.refillMetabaseWorkersCount),
+ shard.WithRebuildWorkersCount(shCfg.rebuildWorkersCount),
shard.WithMode(shCfg.mode),
shard.WithBlobStorOptions(blobstoreOpts...),
shard.WithMetaBaseOptions(mbOptions...),
diff --git a/cmd/frostfs-node/config/engine/config.go b/cmd/frostfs-node/config/engine/config.go
index baa4e3c9d..c944d1c58 100644
--- a/cmd/frostfs-node/config/engine/config.go
+++ b/cmd/frostfs-node/config/engine/config.go
@@ -15,9 +15,6 @@ const (
// ShardPoolSizeDefault is a default value of routine pool size per-shard to
// process object PUT operations in a storage engine.
ShardPoolSizeDefault = 20
- // RebuildWorkersCountDefault is a default value of the workers count to
- // process storage rebuild operations in a storage engine.
- RebuildWorkersCountDefault = 100
)
// ErrNoShardConfigured is returned when at least 1 shard is required but none are found.
@@ -91,11 +88,3 @@ func ShardErrorThreshold(c *config.Config) uint32 {
func EngineLowMemoryConsumption(c *config.Config) bool {
return config.BoolSafe(c.Sub(subsection), "low_mem")
}
-
-// EngineRebuildWorkersCount returns value of "rebuild_workers_count" config parmeter from "storage" section.
-func EngineRebuildWorkersCount(c *config.Config) uint32 {
- if v := config.Uint32Safe(c.Sub(subsection), "rebuild_workers_count"); v > 0 {
- return v
- }
- return RebuildWorkersCountDefault
-}
diff --git a/cmd/frostfs-node/config/engine/config_test.go b/cmd/frostfs-node/config/engine/config_test.go
index d53207ccc..464d72556 100644
--- a/cmd/frostfs-node/config/engine/config_test.go
+++ b/cmd/frostfs-node/config/engine/config_test.go
@@ -39,7 +39,6 @@ func TestEngineSection(t *testing.T) {
require.EqualValues(t, 0, engineconfig.ShardErrorThreshold(empty))
require.EqualValues(t, engineconfig.ShardPoolSizeDefault, engineconfig.ShardPoolSize(empty))
require.EqualValues(t, mode.ReadWrite, shardconfig.From(empty).Mode())
- require.EqualValues(t, engineconfig.RebuildWorkersCountDefault, engineconfig.EngineRebuildWorkersCount(empty))
})
const path = "../../../../config/example/node"
@@ -49,7 +48,6 @@ func TestEngineSection(t *testing.T) {
require.EqualValues(t, 100, engineconfig.ShardErrorThreshold(c))
require.EqualValues(t, 15, engineconfig.ShardPoolSize(c))
- require.EqualValues(t, uint32(1000), engineconfig.EngineRebuildWorkersCount(c))
err := engineconfig.IterateShards(c, true, func(sc *shardconfig.Config) error {
defer func() {
@@ -121,6 +119,7 @@ func TestEngineSection(t *testing.T) {
require.Equal(t, false, sc.RefillMetabase())
require.Equal(t, mode.ReadOnly, sc.Mode())
require.Equal(t, 100, sc.RefillMetabaseWorkersCount())
+ require.Equal(t, uint32(1000), sc.RebuildWorkerCount())
case 1:
require.Equal(t, "tmp/1/blob/pilorama.db", pl.Path())
require.Equal(t, fs.FileMode(0o644), pl.Perm())
@@ -176,6 +175,7 @@ func TestEngineSection(t *testing.T) {
require.Equal(t, true, sc.RefillMetabase())
require.Equal(t, mode.ReadWrite, sc.Mode())
require.Equal(t, shardconfig.RefillMetabaseWorkersCountDefault, sc.RefillMetabaseWorkersCount())
+ require.Equal(t, uint32(shardconfig.RebuildWorkersCountDefault), sc.RebuildWorkerCount())
}
return nil
})
diff --git a/cmd/frostfs-node/config/engine/shard/config.go b/cmd/frostfs-node/config/engine/shard/config.go
index 0620c9f63..ec9df0e89 100644
--- a/cmd/frostfs-node/config/engine/shard/config.go
+++ b/cmd/frostfs-node/config/engine/shard/config.go
@@ -19,6 +19,7 @@ const (
SmallSizeLimitDefault = 1 << 20
EstimateCompressibilityThresholdDefault = 0.1
RefillMetabaseWorkersCountDefault = 500
+ RebuildWorkersCountDefault = 5
)
// From wraps config section into Config.
@@ -149,6 +150,20 @@ func (x *Config) RefillMetabaseWorkersCount() int {
return RefillMetabaseWorkersCountDefault
}
+// RebuildWorkersCount returns the value of "resync_metabase_worker_count" config parameter.
+//
+// Returns RebuildWorkersCountDefault if the value is not a positive number.
+func (x *Config) RebuildWorkerCount() uint32 {
+ v := config.Uint32Safe(
+ (*config.Config)(x),
+ "rebuild_worker_count",
+ )
+ if v > 0 {
+ return v
+ }
+ return RebuildWorkersCountDefault
+}
+
// Mode return the value of "mode" config parameter.
//
// Panics if read the value is not one of predefined
diff --git a/config/example/node.env b/config/example/node.env
index b39423ffb..1eccd8a5d 100644
--- a/config/example/node.env
+++ b/config/example/node.env
@@ -90,11 +90,11 @@ FROSTFS_OBJECT_DELETE_TOMBSTONE_LIFETIME=10
# Storage engine section
FROSTFS_STORAGE_SHARD_POOL_SIZE=15
FROSTFS_STORAGE_SHARD_RO_ERROR_THRESHOLD=100
-FROSTFS_STORAGE_REBUILD_WORKERS_COUNT=1000
## 0 shard
### Flag to refill Metabase from BlobStor
FROSTFS_STORAGE_SHARD_0_RESYNC_METABASE=false
FROSTFS_STORAGE_SHARD_0_RESYNC_METABASE_WORKER_COUNT=100
+FROSTFS_STORAGE_SHARD_0_REBUILD_WORKER_COUNT=1000
### Flag to set shard mode
FROSTFS_STORAGE_SHARD_0_MODE=read-only
### Write cache config
diff --git a/config/example/node.json b/config/example/node.json
index fe2de0e01..be7ced77a 100644
--- a/config/example/node.json
+++ b/config/example/node.json
@@ -135,12 +135,12 @@
"storage": {
"shard_pool_size": 15,
"shard_ro_error_threshold": 100,
- "rebuild_workers_count": 1000,
"shard": {
"0": {
"mode": "read-only",
"resync_metabase": false,
"resync_metabase_worker_count": 100,
+ "rebuild_worker_count": 1000,
"writecache": {
"enabled": false,
"no_sync": true,
diff --git a/config/example/node.yaml b/config/example/node.yaml
index cc339a427..4b9720655 100644
--- a/config/example/node.yaml
+++ b/config/example/node.yaml
@@ -117,7 +117,6 @@ storage:
# note: shard configuration can be omitted for relay node (see `node.relay`)
shard_pool_size: 15 # size of per-shard worker pools used for PUT operations
shard_ro_error_threshold: 100 # amount of errors to occur before shard is made read-only (default: 0, ignore errors)
- rebuild_workers_count: 1000 # count of rebuild storage concurrent workers
shard:
default: # section with the default shard parameters
@@ -165,6 +164,7 @@ storage:
# disabled (do not work with the shard, allows to not remove it from the config)
resync_metabase: false # sync metabase with blobstor on start, expensive, leave false until complete understanding
resync_metabase_worker_count: 100
+ rebuild_worker_count: 1000 # count of rebuild storage concurrent workers
writecache:
enabled: false
diff --git a/docs/storage-node-configuration.md b/docs/storage-node-configuration.md
index 5bf35cd65..f390d84a4 100644
--- a/docs/storage-node-configuration.md
+++ b/docs/storage-node-configuration.md
@@ -189,6 +189,7 @@ The following table describes configuration for each shard.
| `mode` | `string` | `read-write` | Shard Mode.
Possible values: `read-write`, `read-only`, `degraded`, `degraded-read-only`, `disabled` |
| `resync_metabase` | `bool` | `false` | Flag to enable metabase resync on start. |
| `resync_metabase_worker_count` | `int` | `1000` | Count of concurrent workers to resync metabase. |
+| `rebuild_worker_count` | `int` | `5` | Count of concurrent workers to rebuild blobstore. |
| `writecache` | [Writecache config](#writecache-subsection) | | Write-cache configuration. |
| `metabase` | [Metabase config](#metabase-subsection) | | Metabase configuration. |
| `blobstor` | [Blobstor config](#blobstor-subsection) | | Blobstor configuration. |
diff --git a/pkg/local_object_storage/engine/engine.go b/pkg/local_object_storage/engine/engine.go
index b87d77e6c..5e883a641 100644
--- a/pkg/local_object_storage/engine/engine.go
+++ b/pkg/local_object_storage/engine/engine.go
@@ -40,7 +40,6 @@ type StorageEngine struct {
err error
}
evacuateLimiter *evacuationLimiter
- rebuildLimiter *rebuildLimiter
}
type shardWrapper struct {
@@ -213,16 +212,13 @@ type cfg struct {
lowMem bool
- rebuildWorkersCount uint32
-
containerSource atomic.Pointer[containerSource]
}
func defaultCfg() *cfg {
res := &cfg{
- log: &logger.Logger{Logger: zap.L()},
- shardPoolSize: 20,
- rebuildWorkersCount: 100,
+ log: &logger.Logger{Logger: zap.L()},
+ shardPoolSize: 20,
}
res.containerSource.Store(&containerSource{})
return res
@@ -243,7 +239,6 @@ func New(opts ...Option) *StorageEngine {
closeCh: make(chan struct{}),
setModeCh: make(chan setModeRequest),
evacuateLimiter: &evacuationLimiter{},
- rebuildLimiter: newRebuildLimiter(c.rebuildWorkersCount),
}
}
@@ -282,13 +277,6 @@ func WithLowMemoryConsumption(lowMemCons bool) Option {
}
}
-// WithRebuildWorkersCount returns an option to set the count of concurrent rebuild workers.
-func WithRebuildWorkersCount(count uint32) Option {
- return func(c *cfg) {
- c.rebuildWorkersCount = count
- }
-}
-
// SetContainerSource sets container source.
func (e *StorageEngine) SetContainerSource(cs container.Source) {
e.containerSource.Store(&containerSource{cs: cs})
diff --git a/pkg/local_object_storage/engine/rebuild.go b/pkg/local_object_storage/engine/rebuild.go
index 3970aae89..83c6a54ed 100644
--- a/pkg/local_object_storage/engine/rebuild.go
+++ b/pkg/local_object_storage/engine/rebuild.go
@@ -41,7 +41,7 @@ func (e *StorageEngine) Rebuild(ctx context.Context, prm RebuildPrm) (RebuildRes
}
resGuard := &sync.Mutex{}
- limiter := newRebuildLimiter(prm.ConcurrencyLimit)
+ limiter := shard.NewRebuildLimiter(prm.ConcurrencyLimit)
eg, egCtx := errgroup.WithContext(ctx)
for _, shardID := range prm.ShardIDs {
diff --git a/pkg/local_object_storage/engine/rebuild_limiter.go b/pkg/local_object_storage/engine/rebuild_limiter.go
deleted file mode 100644
index 28b02b0a3..000000000
--- a/pkg/local_object_storage/engine/rebuild_limiter.go
+++ /dev/null
@@ -1,26 +0,0 @@
-package engine
-
-import "context"
-
-type rebuildLimiter struct {
- semaphore chan struct{}
-}
-
-func newRebuildLimiter(workersCount uint32) *rebuildLimiter {
- return &rebuildLimiter{
- semaphore: make(chan struct{}, workersCount),
- }
-}
-
-func (l *rebuildLimiter) AcquireWorkSlot(ctx context.Context) error {
- select {
- case l.semaphore <- struct{}{}:
- return nil
- case <-ctx.Done():
- return ctx.Err()
- }
-}
-
-func (l *rebuildLimiter) ReleaseWorkSlot() {
- <-l.semaphore
-}
diff --git a/pkg/local_object_storage/engine/shards.go b/pkg/local_object_storage/engine/shards.go
index 40584149e..2ad6859e4 100644
--- a/pkg/local_object_storage/engine/shards.go
+++ b/pkg/local_object_storage/engine/shards.go
@@ -137,7 +137,6 @@ func (e *StorageEngine) createShard(_ context.Context, opts []shard.Option) (*sh
shard.WithExpiredLocksCallback(e.processExpiredLocks),
shard.WithDeletedLockCallback(e.processDeletedLocks),
shard.WithReportErrorFunc(e.reportShardErrorBackground),
- shard.WithRebuildWorkerLimiter(e.rebuildLimiter),
shard.WithZeroSizeCallback(e.processZeroSizeContainers),
shard.WithZeroCountCallback(e.processZeroCountContainers),
)...)
diff --git a/pkg/local_object_storage/shard/control.go b/pkg/local_object_storage/shard/control.go
index 6efe4ec37..5e9639a7b 100644
--- a/pkg/local_object_storage/shard/control.go
+++ b/pkg/local_object_storage/shard/control.go
@@ -129,7 +129,7 @@ func (s *Shard) Init(ctx context.Context) error {
s.gc.init(ctx)
- s.rb = newRebuilder(s.rebuildLimiter)
+ s.rb = newRebuilder(NewRebuildLimiter(s.rebuildWorkersCount))
if !m.NoMetabase() && !s.rebuildDisabled {
s.rb.Start(ctx, s.blobStor, s.metaBase, s.log)
}
diff --git a/pkg/local_object_storage/shard/rebuild.go b/pkg/local_object_storage/shard/rebuild.go
index f8051999e..2eef456be 100644
--- a/pkg/local_object_storage/shard/rebuild.go
+++ b/pkg/local_object_storage/shard/rebuild.go
@@ -26,7 +26,7 @@ type rebuildLimiter struct {
semaphore chan struct{}
}
-func newRebuildLimiter(workersCount uint32) *rebuildLimiter {
+func NewRebuildLimiter(workersCount uint32) RebuildWorkerLimiter {
return &rebuildLimiter{
semaphore: make(chan struct{}, workersCount),
}
diff --git a/pkg/local_object_storage/shard/shard.go b/pkg/local_object_storage/shard/shard.go
index 1eaee8815..1e2bb7900 100644
--- a/pkg/local_object_storage/shard/shard.go
+++ b/pkg/local_object_storage/shard/shard.go
@@ -140,7 +140,7 @@ type cfg struct {
reportErrorFunc func(selfID string, message string, err error)
- rebuildLimiter RebuildWorkerLimiter
+ rebuildWorkersCount uint32
rebuildDisabled bool
}
@@ -151,7 +151,7 @@ func defaultCfg() *cfg {
log: &logger.Logger{Logger: zap.L()},
gcCfg: defaultGCCfg(),
reportErrorFunc: func(string, string, error) {},
- rebuildLimiter: newRebuildLimiter(1),
+ rebuildWorkersCount: 1,
zeroSizeContainersCallback: func(context.Context, []cid.ID) {},
zeroCountContainersCallback: func(context.Context, []cid.ID) {},
}
@@ -384,11 +384,11 @@ func WithExpiredCollectorWorkerCount(count int) Option {
}
}
-// WithRebuildWorkerLimiter return option to set concurrent
+// WithRebuildWorkersCount return option to set concurrent
// workers count of storage rebuild operation.
-func WithRebuildWorkerLimiter(l RebuildWorkerLimiter) Option {
+func WithRebuildWorkersCount(count uint32) Option {
return func(c *cfg) {
- c.rebuildLimiter = l
+ c.rebuildWorkersCount = count
}
}
From edb1747af7765fe685d9d4736626c409fbde7c79 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Sun, 1 Sep 2024 12:29:02 +0300
Subject: [PATCH 086/705] [#1337] blobovniczatree: Add rebuild by overflow
Signed-off-by: Dmitrii Stepanov
---
.../blobstor/blobovniczatree/rebuild.go | 11 ++-
.../blobstor/blobovniczatree/rebuild_test.go | 74 +++++++++++++++++++
2 files changed, 82 insertions(+), 3 deletions(-)
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go
index b7f20822e..b7b1dfd4b 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go
@@ -172,7 +172,7 @@ func (b *Blobovniczas) selectDBsDoNotMatchFillPercent(ctx context.Context, targe
continue
}
path := filepath.Join(lvlPath, e.Name())
- resettlementRequired, err := b.fillPercentIsLow(path, target)
+ resettlementRequired, err := b.rebuildBySize(path, target)
if err != nil {
return false, err
}
@@ -187,14 +187,19 @@ func (b *Blobovniczas) selectDBsDoNotMatchFillPercent(ctx context.Context, targe
return result, nil
}
-func (b *Blobovniczas) fillPercentIsLow(path string, target int) (bool, error) {
+func (b *Blobovniczas) rebuildBySize(path string, targetFillPercent int) (bool, error) {
shDB := b.getBlobovnicza(path)
blz, err := shDB.Open()
if err != nil {
return false, err
}
defer shDB.Close()
- return blz.FillPercent() < target, nil
+ fp := blz.FillPercent()
+ // accepted fill percent defines as
+ // |----|+++++++++++++++++|+++++++++++++++++|---------------
+ // 0% target 100% 100+(100 - target)
+ // where `+` - accepted fill percent, `-` - not accepted fill percent
+ return fp < targetFillPercent || fp > 100+(100-targetFillPercent), nil
}
func (b *Blobovniczas) rebuildDB(ctx context.Context, path string, meta common.MetaStorage, limiter common.ConcurrentWorkersLimiter) (uint64, error) {
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go
index 62ae9ea90..e6da1c553 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go
@@ -228,6 +228,80 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) {
require.NoError(t, b.Close())
})
+
+ t.Run("rebuild by overflow", func(t *testing.T) {
+ t.Parallel()
+
+ dir := t.TempDir()
+ b := NewBlobovniczaTree(
+ context.Background(),
+ WithLogger(test.NewLogger(t)),
+ WithObjectSizeLimit(64*1024),
+ WithBlobovniczaShallowWidth(1), // single directory
+ WithBlobovniczaShallowDepth(1),
+ WithRootPath(dir),
+ WithBlobovniczaSize(100*1024), // 100 KB limit for each blobovnicza
+ WithWaitBeforeDropDB(0),
+ WithOpenedCacheSize(1000),
+ WithMoveBatchSize(3))
+ require.NoError(t, b.Open(mode.ComponentReadWrite))
+ require.NoError(t, b.Init())
+
+ storageIDs := make(map[oid.Address][]byte)
+ for i := 0; i < 100; i++ { // 2 objects for one blobovnicza, so 50 DBs total will be created
+ obj := blobstortest.NewObject(64 * 1024)
+ data, err := obj.Marshal()
+ require.NoError(t, err)
+ var prm common.PutPrm
+ prm.Address = object.AddressOf(obj)
+ prm.RawData = data
+ res, err := b.Put(context.Background(), prm)
+ require.NoError(t, err)
+ storageIDs[prm.Address] = res.StorageID
+ }
+ metaStub := &storageIDUpdateStub{
+ storageIDs: storageIDs,
+ guard: &sync.Mutex{},
+ }
+ require.NoError(t, b.Close())
+ b = NewBlobovniczaTree(
+ context.Background(),
+ WithLogger(test.NewLogger(t)),
+ WithObjectSizeLimit(64*1024),
+ WithBlobovniczaShallowWidth(1),
+ WithBlobovniczaShallowDepth(1),
+ WithRootPath(dir),
+ WithBlobovniczaSize(50*1024), // 50 KB limit for each blobovnicza
+ WithWaitBeforeDropDB(0),
+ WithOpenedCacheSize(1000),
+ WithMoveBatchSize(3))
+ require.NoError(t, b.Open(mode.ComponentReadWrite))
+ require.NoError(t, b.Init())
+
+ rRes, err := b.Rebuild(context.Background(), common.RebuildPrm{
+ MetaStorage: metaStub,
+ WorkerLimiter: &rebuildLimiterStub{},
+ Action: common.RebuildAction{
+ SchemaChange: false,
+ FillPercent: true,
+ FillPercentValue: 80,
+ },
+ })
+ require.NoError(t, err)
+ require.Equal(t, uint64(49), rRes.FilesRemoved)
+ require.Equal(t, uint64(98), rRes.ObjectsMoved) // 49 DBs with 2 objects
+ require.Equal(t, uint64(98), metaStub.updatedCount)
+
+ for addr, storageID := range storageIDs {
+ var gPrm common.GetPrm
+ gPrm.Address = addr
+ gPrm.StorageID = storageID
+ _, err := b.Get(context.Background(), gPrm)
+ require.NoError(t, err)
+ }
+
+ require.NoError(t, b.Close())
+ })
}
func TestBlobovniczaTreeRebuildLargeObject(t *testing.T) {
From d3b209c8e19edfdeb13035e1d8f4b4815cf77f08 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Wed, 4 Sep 2024 09:30:56 +0300
Subject: [PATCH 087/705] [#1337] shard: Disable background rebuild
Since `frostfs-cli control shards rebuild` command was added,
there is no need for background rebuild now.
For failover tests used used value 1 to rebuild only schema change.
Signed-off-by: Dmitrii Stepanov
---
cmd/frostfs-node/config.go | 4 --
cmd/frostfs-node/config/engine/config_test.go | 2 -
.../config/engine/shard/config.go | 15 -----
config/example/node.env | 1 -
config/example/node.json | 1 -
config/example/node.yaml | 1 -
docs/storage-node-configuration.md | 1 -
.../blobstor/blobovniczatree/rebuild.go | 31 ++++------
.../blobovniczatree/rebuild_failover_test.go | 3 +-
.../blobstor/blobovniczatree/rebuild_test.go | 28 ++-------
.../blobstor/common/rebuild.go | 9 +--
pkg/local_object_storage/blobstor/rebuild.go | 4 +-
pkg/local_object_storage/shard/control.go | 6 +-
pkg/local_object_storage/shard/gc_test.go | 2 +-
pkg/local_object_storage/shard/rebuild.go | 58 +++++++------------
pkg/local_object_storage/shard/shard.go | 21 -------
16 files changed, 49 insertions(+), 138 deletions(-)
diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go
index b59518d14..16f49a082 100644
--- a/cmd/frostfs-node/config.go
+++ b/cmd/frostfs-node/config.go
@@ -126,8 +126,6 @@ type shardCfg struct {
refillMetabaseWorkersCount int
mode shardmode.Mode
- rebuildWorkersCount uint32
-
metaCfg struct {
path string
perm fs.FileMode
@@ -240,7 +238,6 @@ func (a *applicationConfiguration) updateShardConfig(c *config.Config, oldConfig
newConfig.refillMetabase = oldConfig.RefillMetabase()
newConfig.refillMetabaseWorkersCount = oldConfig.RefillMetabaseWorkersCount()
- newConfig.rebuildWorkersCount = oldConfig.RebuildWorkerCount()
newConfig.mode = oldConfig.Mode()
newConfig.compress = oldConfig.Compress()
newConfig.estimateCompressibility = oldConfig.EstimateCompressibility()
@@ -998,7 +995,6 @@ func (c *cfg) getShardOpts(ctx context.Context, shCfg shardCfg) shardOptsWithID
shard.WithLogger(c.log),
shard.WithRefillMetabase(shCfg.refillMetabase),
shard.WithRefillMetabaseWorkersCount(shCfg.refillMetabaseWorkersCount),
- shard.WithRebuildWorkersCount(shCfg.rebuildWorkersCount),
shard.WithMode(shCfg.mode),
shard.WithBlobStorOptions(blobstoreOpts...),
shard.WithMetaBaseOptions(mbOptions...),
diff --git a/cmd/frostfs-node/config/engine/config_test.go b/cmd/frostfs-node/config/engine/config_test.go
index 464d72556..ef6bf7f74 100644
--- a/cmd/frostfs-node/config/engine/config_test.go
+++ b/cmd/frostfs-node/config/engine/config_test.go
@@ -119,7 +119,6 @@ func TestEngineSection(t *testing.T) {
require.Equal(t, false, sc.RefillMetabase())
require.Equal(t, mode.ReadOnly, sc.Mode())
require.Equal(t, 100, sc.RefillMetabaseWorkersCount())
- require.Equal(t, uint32(1000), sc.RebuildWorkerCount())
case 1:
require.Equal(t, "tmp/1/blob/pilorama.db", pl.Path())
require.Equal(t, fs.FileMode(0o644), pl.Perm())
@@ -175,7 +174,6 @@ func TestEngineSection(t *testing.T) {
require.Equal(t, true, sc.RefillMetabase())
require.Equal(t, mode.ReadWrite, sc.Mode())
require.Equal(t, shardconfig.RefillMetabaseWorkersCountDefault, sc.RefillMetabaseWorkersCount())
- require.Equal(t, uint32(shardconfig.RebuildWorkersCountDefault), sc.RebuildWorkerCount())
}
return nil
})
diff --git a/cmd/frostfs-node/config/engine/shard/config.go b/cmd/frostfs-node/config/engine/shard/config.go
index ec9df0e89..0620c9f63 100644
--- a/cmd/frostfs-node/config/engine/shard/config.go
+++ b/cmd/frostfs-node/config/engine/shard/config.go
@@ -19,7 +19,6 @@ const (
SmallSizeLimitDefault = 1 << 20
EstimateCompressibilityThresholdDefault = 0.1
RefillMetabaseWorkersCountDefault = 500
- RebuildWorkersCountDefault = 5
)
// From wraps config section into Config.
@@ -150,20 +149,6 @@ func (x *Config) RefillMetabaseWorkersCount() int {
return RefillMetabaseWorkersCountDefault
}
-// RebuildWorkersCount returns the value of "resync_metabase_worker_count" config parameter.
-//
-// Returns RebuildWorkersCountDefault if the value is not a positive number.
-func (x *Config) RebuildWorkerCount() uint32 {
- v := config.Uint32Safe(
- (*config.Config)(x),
- "rebuild_worker_count",
- )
- if v > 0 {
- return v
- }
- return RebuildWorkersCountDefault
-}
-
// Mode return the value of "mode" config parameter.
//
// Panics if read the value is not one of predefined
diff --git a/config/example/node.env b/config/example/node.env
index 1eccd8a5d..82553745e 100644
--- a/config/example/node.env
+++ b/config/example/node.env
@@ -94,7 +94,6 @@ FROSTFS_STORAGE_SHARD_RO_ERROR_THRESHOLD=100
### Flag to refill Metabase from BlobStor
FROSTFS_STORAGE_SHARD_0_RESYNC_METABASE=false
FROSTFS_STORAGE_SHARD_0_RESYNC_METABASE_WORKER_COUNT=100
-FROSTFS_STORAGE_SHARD_0_REBUILD_WORKER_COUNT=1000
### Flag to set shard mode
FROSTFS_STORAGE_SHARD_0_MODE=read-only
### Write cache config
diff --git a/config/example/node.json b/config/example/node.json
index be7ced77a..da108c692 100644
--- a/config/example/node.json
+++ b/config/example/node.json
@@ -140,7 +140,6 @@
"mode": "read-only",
"resync_metabase": false,
"resync_metabase_worker_count": 100,
- "rebuild_worker_count": 1000,
"writecache": {
"enabled": false,
"no_sync": true,
diff --git a/config/example/node.yaml b/config/example/node.yaml
index 4b9720655..a79f48226 100644
--- a/config/example/node.yaml
+++ b/config/example/node.yaml
@@ -164,7 +164,6 @@ storage:
# disabled (do not work with the shard, allows to not remove it from the config)
resync_metabase: false # sync metabase with blobstor on start, expensive, leave false until complete understanding
resync_metabase_worker_count: 100
- rebuild_worker_count: 1000 # count of rebuild storage concurrent workers
writecache:
enabled: false
diff --git a/docs/storage-node-configuration.md b/docs/storage-node-configuration.md
index f390d84a4..5bf35cd65 100644
--- a/docs/storage-node-configuration.md
+++ b/docs/storage-node-configuration.md
@@ -189,7 +189,6 @@ The following table describes configuration for each shard.
| `mode` | `string` | `read-write` | Shard Mode.
Possible values: `read-write`, `read-only`, `degraded`, `degraded-read-only`, `disabled` |
| `resync_metabase` | `bool` | `false` | Flag to enable metabase resync on start. |
| `resync_metabase_worker_count` | `int` | `1000` | Count of concurrent workers to resync metabase. |
-| `rebuild_worker_count` | `int` | `5` | Count of concurrent workers to rebuild blobstore. |
| `writecache` | [Writecache config](#writecache-subsection) | | Write-cache configuration. |
| `metabase` | [Metabase config](#metabase-subsection) | | Metabase configuration. |
| `blobstor` | [Blobstor config](#blobstor-subsection) | | Blobstor configuration. |
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go
index b7b1dfd4b..202d38cd7 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go
@@ -60,7 +60,7 @@ func (b *Blobovniczas) Rebuild(ctx context.Context, prm common.RebuildPrm) (comm
b.log.Debug(logs.BlobovniczaTreeCompletedPreviousRebuildSuccess)
b.log.Debug(logs.BlobovniczaTreeCollectingDBToRebuild)
- dbsToMigrate, err := b.getDBsToRebuild(ctx, prm.Action)
+ dbsToMigrate, err := b.getDBsToRebuild(ctx, prm.FillPercent)
if err != nil {
b.log.Warn(logs.BlobovniczaTreeCollectingDBToRebuildFailed, zap.Error(err))
success = false
@@ -94,27 +94,20 @@ func (b *Blobovniczas) migrateDBs(ctx context.Context, dbs []string, prm common.
return res, nil
}
-func (b *Blobovniczas) getDBsToRebuild(ctx context.Context, action common.RebuildAction) ([]string, error) {
- schemaChange := make(map[string]struct{})
- fillPercent := make(map[string]struct{})
- var err error
- if action.SchemaChange {
- schemaChange, err = b.selectDBsDoNotMatchSchema(ctx)
- if err != nil {
- return nil, err
- }
+func (b *Blobovniczas) getDBsToRebuild(ctx context.Context, fillPercent int) ([]string, error) {
+ withSchemaChange, err := b.selectDBsDoNotMatchSchema(ctx)
+ if err != nil {
+ return nil, err
}
- if action.FillPercent {
- fillPercent, err = b.selectDBsDoNotMatchFillPercent(ctx, action.FillPercentValue)
- if err != nil {
- return nil, err
- }
+ withFillPercent, err := b.selectDBsDoNotMatchFillPercent(ctx, fillPercent)
+ if err != nil {
+ return nil, err
}
- for k := range fillPercent {
- schemaChange[k] = struct{}{}
+ for k := range withFillPercent {
+ withSchemaChange[k] = struct{}{}
}
- result := make([]string, 0, len(schemaChange))
- for db := range schemaChange {
+ result := make([]string, 0, len(withSchemaChange))
+ for db := range withSchemaChange {
result = append(result, db)
}
return result, nil
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_failover_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_failover_test.go
index 9fec795ca..b177d20fc 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_failover_test.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_failover_test.go
@@ -145,7 +145,7 @@ func testRebuildFailoverValidate(t *testing.T, dir string, obj *objectSDK.Object
WithBlobovniczaShallowWidth(2),
WithBlobovniczaShallowDepth(2),
WithRootPath(dir),
- WithBlobovniczaSize(100*1024*1024),
+ WithBlobovniczaSize(10*1024),
WithWaitBeforeDropDB(0),
WithOpenedCacheSize(1000))
require.NoError(t, b.Open(mode.ComponentReadWrite))
@@ -164,6 +164,7 @@ func testRebuildFailoverValidate(t *testing.T, dir string, obj *objectSDK.Object
rRes, err := b.Rebuild(context.Background(), common.RebuildPrm{
MetaStorage: metaStub,
WorkerLimiter: &rebuildLimiterStub{},
+ FillPercent: 1,
})
require.NoError(t, err)
require.Equal(t, uint64(1), rRes.ObjectsMoved)
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go
index e6da1c553..dfd928aaf 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go
@@ -79,11 +79,7 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) {
rRes, err := b.Rebuild(context.Background(), common.RebuildPrm{
MetaStorage: metaStub,
WorkerLimiter: &rebuildLimiterStub{},
- Action: common.RebuildAction{
- SchemaChange: false,
- FillPercent: true,
- FillPercentValue: 60,
- },
+ FillPercent: 60,
})
require.NoError(t, err)
dataMigrated := rRes.ObjectsMoved > 0 || rRes.FilesRemoved > 0 || metaStub.updatedCount > 0
@@ -135,11 +131,7 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) {
rRes, err := b.Rebuild(context.Background(), common.RebuildPrm{
MetaStorage: metaStub,
WorkerLimiter: &rebuildLimiterStub{},
- Action: common.RebuildAction{
- SchemaChange: false,
- FillPercent: true,
- FillPercentValue: 90, // 64KB / 100KB = 64%
- },
+ FillPercent: 90, // 64KB / 100KB = 64%
})
require.NoError(t, err)
dataMigrated := rRes.ObjectsMoved > 0 || rRes.FilesRemoved > 0 || metaStub.updatedCount > 0
@@ -204,11 +196,7 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) {
rRes, err := b.Rebuild(context.Background(), common.RebuildPrm{
MetaStorage: metaStub,
WorkerLimiter: &rebuildLimiterStub{},
- Action: common.RebuildAction{
- SchemaChange: false,
- FillPercent: true,
- FillPercentValue: 80,
- },
+ FillPercent: 80,
})
require.NoError(t, err)
require.Equal(t, uint64(49), rRes.FilesRemoved)
@@ -281,11 +269,7 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) {
rRes, err := b.Rebuild(context.Background(), common.RebuildPrm{
MetaStorage: metaStub,
WorkerLimiter: &rebuildLimiterStub{},
- Action: common.RebuildAction{
- SchemaChange: false,
- FillPercent: true,
- FillPercentValue: 80,
- },
+ FillPercent: 80,
})
require.NoError(t, err)
require.Equal(t, uint64(49), rRes.FilesRemoved)
@@ -357,7 +341,7 @@ func TestBlobovniczaTreeRebuildLargeObject(t *testing.T) {
var rPrm common.RebuildPrm
rPrm.MetaStorage = metaStub
rPrm.WorkerLimiter = &rebuildLimiterStub{}
- rPrm.Action = common.RebuildAction{SchemaChange: true}
+ rPrm.FillPercent = 1
rRes, err := b.Rebuild(context.Background(), rPrm)
require.NoError(t, err)
dataMigrated := rRes.ObjectsMoved > 0 || rRes.FilesRemoved > 0 || metaStub.updatedCount > 0
@@ -446,7 +430,7 @@ func testBlobovniczaTreeRebuildHelper(t *testing.T, sourceDepth, sourceWidth, ta
var rPrm common.RebuildPrm
rPrm.MetaStorage = metaStub
rPrm.WorkerLimiter = &rebuildLimiterStub{}
- rPrm.Action = common.RebuildAction{SchemaChange: true}
+ rPrm.FillPercent = 1
rRes, err := b.Rebuild(context.Background(), rPrm)
require.NoError(t, err)
dataMigrated := rRes.ObjectsMoved > 0 || rRes.FilesRemoved > 0 || metaStub.updatedCount > 0
diff --git a/pkg/local_object_storage/blobstor/common/rebuild.go b/pkg/local_object_storage/blobstor/common/rebuild.go
index 020d9d022..19e181ee7 100644
--- a/pkg/local_object_storage/blobstor/common/rebuild.go
+++ b/pkg/local_object_storage/blobstor/common/rebuild.go
@@ -11,17 +11,10 @@ type RebuildRes struct {
FilesRemoved uint64
}
-type RebuildAction struct {
- SchemaChange bool
-
- FillPercent bool
- FillPercentValue int
-}
-
type RebuildPrm struct {
MetaStorage MetaStorage
WorkerLimiter ConcurrentWorkersLimiter
- Action RebuildAction
+ FillPercent int
}
type MetaStorage interface {
diff --git a/pkg/local_object_storage/blobstor/rebuild.go b/pkg/local_object_storage/blobstor/rebuild.go
index 31bc2d167..7b2786ba2 100644
--- a/pkg/local_object_storage/blobstor/rebuild.go
+++ b/pkg/local_object_storage/blobstor/rebuild.go
@@ -18,14 +18,14 @@ type ConcurrentWorkersLimiter interface {
ReleaseWorkSlot()
}
-func (b *BlobStor) Rebuild(ctx context.Context, upd StorageIDUpdate, limiter ConcurrentWorkersLimiter, action common.RebuildAction) error {
+func (b *BlobStor) Rebuild(ctx context.Context, upd StorageIDUpdate, limiter ConcurrentWorkersLimiter, fillPercent int) error {
var summary common.RebuildRes
var rErr error
for _, storage := range b.storage {
res, err := storage.Storage.Rebuild(ctx, common.RebuildPrm{
MetaStorage: upd,
WorkerLimiter: limiter,
- Action: action,
+ FillPercent: fillPercent,
})
summary.FilesRemoved += res.FilesRemoved
summary.ObjectsMoved += res.ObjectsMoved
diff --git a/pkg/local_object_storage/shard/control.go b/pkg/local_object_storage/shard/control.go
index 5e9639a7b..de881654a 100644
--- a/pkg/local_object_storage/shard/control.go
+++ b/pkg/local_object_storage/shard/control.go
@@ -129,8 +129,8 @@ func (s *Shard) Init(ctx context.Context) error {
s.gc.init(ctx)
- s.rb = newRebuilder(NewRebuildLimiter(s.rebuildWorkersCount))
- if !m.NoMetabase() && !s.rebuildDisabled {
+ s.rb = newRebuilder()
+ if !m.NoMetabase() {
s.rb.Start(ctx, s.blobStor, s.metaBase, s.log)
}
s.writecacheSealCancel.Store(dummyCancel)
@@ -398,7 +398,7 @@ func (s *Shard) Reload(ctx context.Context, opts ...Option) error {
defer unlock()
s.rb.Stop(s.log)
- if !s.info.Mode.NoMetabase() && !s.rebuildDisabled {
+ if !s.info.Mode.NoMetabase() {
defer func() {
s.rb.Start(ctx, s.blobStor, s.metaBase, s.log)
}()
diff --git a/pkg/local_object_storage/shard/gc_test.go b/pkg/local_object_storage/shard/gc_test.go
index 1c0ef1c2e..90958cd35 100644
--- a/pkg/local_object_storage/shard/gc_test.go
+++ b/pkg/local_object_storage/shard/gc_test.go
@@ -187,7 +187,7 @@ func TestGCDropsObjectInhumedFromWritecache(t *testing.T) {
func testGCDropsObjectInhumedFromWritecache(t *testing.T, flushbeforeInhume bool) {
sh := newCustomShard(t, true, shardOptions{
- additionalShardOptions: []Option{WithDisabledGC(), WithDisabledRebuild()},
+ additionalShardOptions: []Option{WithDisabledGC()},
wcOpts: []writecache.Option{writecache.WithDisableBackgroundFlush()},
})
defer func() { require.NoError(t, sh.Close()) }()
diff --git a/pkg/local_object_storage/shard/rebuild.go b/pkg/local_object_storage/shard/rebuild.go
index 2eef456be..0d83caa0c 100644
--- a/pkg/local_object_storage/shard/rebuild.go
+++ b/pkg/local_object_storage/shard/rebuild.go
@@ -7,7 +7,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
@@ -17,6 +16,8 @@ import (
"go.uber.org/zap"
)
+var ErrRebuildInProgress = errors.New("shard rebuild in progress")
+
type RebuildWorkerLimiter interface {
AcquireWorkSlot(ctx context.Context) error
ReleaseWorkSlot()
@@ -46,25 +47,23 @@ func (l *rebuildLimiter) ReleaseWorkSlot() {
}
type rebuildTask struct {
- limiter RebuildWorkerLimiter
- action common.RebuildAction
+ limiter RebuildWorkerLimiter
+ fillPercent int
}
type rebuilder struct {
- mtx *sync.Mutex
- wg *sync.WaitGroup
- cancel func()
- limiter RebuildWorkerLimiter
- done chan struct{}
- tasks chan rebuildTask
+ mtx *sync.Mutex
+ wg *sync.WaitGroup
+ cancel func()
+ done chan struct{}
+ tasks chan rebuildTask
}
-func newRebuilder(l RebuildWorkerLimiter) *rebuilder {
+func newRebuilder() *rebuilder {
return &rebuilder{
- mtx: &sync.Mutex{},
- wg: &sync.WaitGroup{},
- limiter: l,
- tasks: make(chan rebuildTask, 10),
+ mtx: &sync.Mutex{},
+ wg: &sync.WaitGroup{},
+ tasks: make(chan rebuildTask),
}
}
@@ -89,25 +88,14 @@ func (r *rebuilder) Start(ctx context.Context, bs *blobstor.BlobStor, mb *meta.D
if !ok {
continue
}
- runRebuild(ctx, bs, mb, log, t.action, t.limiter)
+ runRebuild(ctx, bs, mb, log, t.fillPercent, t.limiter)
}
}
}()
- select {
- case <-ctx.Done():
- return
- case r.tasks <- rebuildTask{
- limiter: r.limiter,
- action: common.RebuildAction{
- SchemaChange: true,
- },
- }:
- return
- }
}
func runRebuild(ctx context.Context, bs *blobstor.BlobStor, mb *meta.DB, log *logger.Logger,
- action common.RebuildAction, limiter RebuildWorkerLimiter,
+ fillPercent int, limiter RebuildWorkerLimiter,
) {
select {
case <-ctx.Done():
@@ -115,23 +103,25 @@ func runRebuild(ctx context.Context, bs *blobstor.BlobStor, mb *meta.DB, log *lo
default:
}
log.Info(logs.BlobstoreRebuildStarted)
- if err := bs.Rebuild(ctx, &mbStorageIDUpdate{mb: mb}, limiter, action); err != nil {
+ if err := bs.Rebuild(ctx, &mbStorageIDUpdate{mb: mb}, limiter, fillPercent); err != nil {
log.Warn(logs.FailedToRebuildBlobstore, zap.Error(err))
} else {
log.Info(logs.BlobstoreRebuildCompletedSuccessfully)
}
}
-func (r *rebuilder) ScheduleRebuild(ctx context.Context, limiter RebuildWorkerLimiter, action common.RebuildAction,
+func (r *rebuilder) ScheduleRebuild(ctx context.Context, limiter RebuildWorkerLimiter, fillPercent int,
) error {
select {
case <-ctx.Done():
return ctx.Err()
case r.tasks <- rebuildTask{
- limiter: limiter,
- action: action,
+ limiter: limiter,
+ fillPercent: fillPercent,
}:
return nil
+ default:
+ return ErrRebuildInProgress
}
}
@@ -198,9 +188,5 @@ func (s *Shard) ScheduleRebuild(ctx context.Context, p RebuildPrm) error {
return ErrDegradedMode
}
- return s.rb.ScheduleRebuild(ctx, p.ConcurrencyLimiter, common.RebuildAction{
- SchemaChange: true,
- FillPercent: true,
- FillPercentValue: int(p.TargetFillPercent),
- })
+ return s.rb.ScheduleRebuild(ctx, p.ConcurrencyLimiter, int(p.TargetFillPercent))
}
diff --git a/pkg/local_object_storage/shard/shard.go b/pkg/local_object_storage/shard/shard.go
index 1e2bb7900..7496fc352 100644
--- a/pkg/local_object_storage/shard/shard.go
+++ b/pkg/local_object_storage/shard/shard.go
@@ -139,10 +139,6 @@ type cfg struct {
metricsWriter MetricsWriter
reportErrorFunc func(selfID string, message string, err error)
-
- rebuildWorkersCount uint32
-
- rebuildDisabled bool
}
func defaultCfg() *cfg {
@@ -151,7 +147,6 @@ func defaultCfg() *cfg {
log: &logger.Logger{Logger: zap.L()},
gcCfg: defaultGCCfg(),
reportErrorFunc: func(string, string, error) {},
- rebuildWorkersCount: 1,
zeroSizeContainersCallback: func(context.Context, []cid.ID) {},
zeroCountContainersCallback: func(context.Context, []cid.ID) {},
}
@@ -384,14 +379,6 @@ func WithExpiredCollectorWorkerCount(count int) Option {
}
}
-// WithRebuildWorkersCount return option to set concurrent
-// workers count of storage rebuild operation.
-func WithRebuildWorkersCount(count uint32) Option {
- return func(c *cfg) {
- c.rebuildWorkersCount = count
- }
-}
-
// WithDisabledGC disables GC.
// For testing purposes only.
func WithDisabledGC() Option {
@@ -414,14 +401,6 @@ func WithZeroCountCallback(cb EmptyContainersCallback) Option {
}
}
-// WithDisabledRebuild returns an option to disable a shard rebuild.
-// For testing purposes only.
-func WithDisabledRebuild() Option {
- return func(c *cfg) {
- c.rebuildDisabled = true
- }
-}
-
func (s *Shard) fillInfo() {
s.cfg.info.MetaBaseInfo = s.metaBase.DumpInfo()
s.cfg.info.BlobStorInfo = s.blobStor.DumpInfo()
From 654d970fadfe7eb47a5ce566be81ad15119df6a0 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Fri, 6 Sep 2024 11:16:12 +0300
Subject: [PATCH 088/705] [#1355] adm: Run `metabase upgrade` concurrently
Signed-off-by: Dmitrii Stepanov
---
.../internal/modules/metabase/upgrade.go | 33 +++++++++++++------
1 file changed, 23 insertions(+), 10 deletions(-)
diff --git a/cmd/frostfs-adm/internal/modules/metabase/upgrade.go b/cmd/frostfs-adm/internal/modules/metabase/upgrade.go
index 83e085df4..96cb62f10 100644
--- a/cmd/frostfs-adm/internal/modules/metabase/upgrade.go
+++ b/cmd/frostfs-adm/internal/modules/metabase/upgrade.go
@@ -3,6 +3,7 @@ package metabase
import (
"errors"
"fmt"
+ "sync"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
@@ -11,6 +12,7 @@ import (
shardconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"github.com/spf13/cobra"
+ "golang.org/x/sync/errgroup"
)
const (
@@ -57,17 +59,28 @@ func upgrade(cmd *cobra.Command, _ []string) error {
cmd.Println(i+1, ":", path)
}
result := make(map[string]bool)
+ var resultGuard sync.Mutex
+ eg, ctx := errgroup.WithContext(cmd.Context())
for _, path := range paths {
- cmd.Println("upgrading metabase", path, "...")
- if err := meta.Upgrade(cmd.Context(), path, !noCompact, func(a ...any) {
- cmd.Println(append([]any{time.Now().Format(time.RFC3339), ":", path, ":"}, a...)...)
- }); err != nil {
- result[path] = false
- cmd.Println("error: failed to upgrade metabase", path, ":", err)
- } else {
- result[path] = true
- cmd.Println("metabase", path, "upgraded successfully")
- }
+ eg.Go(func() error {
+ var success bool
+ cmd.Println("upgrading metabase", path, "...")
+ if err := meta.Upgrade(ctx, path, !noCompact, func(a ...any) {
+ cmd.Println(append([]any{time.Now().Format(time.RFC3339), ":", path, ":"}, a...)...)
+ }); err != nil {
+ cmd.Println("error: failed to upgrade metabase", path, ":", err)
+ } else {
+ success = true
+ cmd.Println("metabase", path, "upgraded successfully")
+ }
+ resultGuard.Lock()
+ result[path] = success
+ resultGuard.Unlock()
+ return nil
+ })
+ }
+ if err := eg.Wait(); err != nil {
+ return err
}
for mb, ok := range result {
if ok {
From 4668efc0bfdfac3750307d0863effb991baa1a4b Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Fri, 6 Sep 2024 11:17:02 +0300
Subject: [PATCH 089/705] [#1355] metabase: Upgrade improvements
Do not fail on same latest version to run compact on upgraded metabase.
Use NoSync on compact.
Log every batch on bucket delete stage.
Signed-off-by: Dmitrii Stepanov
---
pkg/local_object_storage/metabase/upgrade.go | 13 ++++++++++---
1 file changed, 10 insertions(+), 3 deletions(-)
diff --git a/pkg/local_object_storage/metabase/upgrade.go b/pkg/local_object_storage/metabase/upgrade.go
index e9abd746c..f677dcf8e 100644
--- a/pkg/local_object_storage/metabase/upgrade.go
+++ b/pkg/local_object_storage/metabase/upgrade.go
@@ -27,6 +27,10 @@ const (
var updates = map[uint64]func(ctx context.Context, db *bbolt.DB, log func(a ...any)) error{
2: upgradeFromV2ToV3,
+ 3: func(_ context.Context, _ *bbolt.DB, log func(a ...any)) error {
+ log("metabase already upgraded")
+ return nil
+ },
}
func Upgrade(ctx context.Context, path string, compact bool, log func(a ...any)) error {
@@ -86,6 +90,7 @@ func compactDB(db *bbolt.DB) error {
}
dst, err := bbolt.Open(tmpFileName, f.Mode(), &bbolt.Options{
Timeout: 100 * time.Millisecond,
+ NoSync: true,
})
if err != nil {
return fmt.Errorf("can't open new metabase to compact: %w", err)
@@ -93,6 +98,9 @@ func compactDB(db *bbolt.DB) error {
if err := bbolt.Compact(dst, db, compactMaxTxSize); err != nil {
return fmt.Errorf("compact metabase: %w", errors.Join(err, dst.Close(), os.Remove(tmpFileName)))
}
+ if err := dst.Sync(); err != nil {
+ return fmt.Errorf("sync compacted metabase: %w", errors.Join(err, os.Remove(tmpFileName)))
+ }
if err := dst.Close(); err != nil {
return fmt.Errorf("close compacted metabase: %w", errors.Join(err, os.Remove(tmpFileName)))
}
@@ -369,8 +377,7 @@ func dropBucketsByPrefix(ctx context.Context, db *bbolt.DB, prefix []byte, log f
log("deleting buckets completed with an error:", err)
return err
}
- if count += uint64(len(keys)); count%upgradeLogFrequency == 0 {
- log("deleted", count, "buckets")
- }
+ count += uint64(len(keys))
+ log("deleted", count, "buckets")
}
}
From 92fe5d90f50a4d0f3c3b5265a32e0127848559e4 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Mon, 9 Sep 2024 18:39:22 +0300
Subject: [PATCH 090/705] [#1359] writecache: Drop unused
Signed-off-by: Dmitrii Stepanov
---
pkg/local_object_storage/writecache/cachebbolt.go | 8 --------
pkg/local_object_storage/writecache/put.go | 5 -----
2 files changed, 13 deletions(-)
diff --git a/pkg/local_object_storage/writecache/cachebbolt.go b/pkg/local_object_storage/writecache/cachebbolt.go
index cdd4ed442..f1e6a619a 100644
--- a/pkg/local_object_storage/writecache/cachebbolt.go
+++ b/pkg/local_object_storage/writecache/cachebbolt.go
@@ -18,16 +18,9 @@ import (
type cache struct {
options
- // mtx protects statistics, counters and compressFlags.
- mtx sync.RWMutex
-
mode mode.Mode
modeMtx sync.RWMutex
- // compressFlags maps address of a big object to boolean value indicating
- // whether object should be compressed.
- compressFlags map[string]struct{}
-
// flushCh is a channel with objects to flush.
flushCh chan objectInfo
// cancel is cancel function, protected by modeMtx in Close.
@@ -66,7 +59,6 @@ func New(opts ...Option) Cache {
flushCh: make(chan objectInfo),
mode: mode.Disabled,
- compressFlags: make(map[string]struct{}),
options: options{
log: &logger.Logger{Logger: zap.NewNop()},
maxObjectSize: defaultMaxObjectSize,
diff --git a/pkg/local_object_storage/writecache/put.go b/pkg/local_object_storage/writecache/put.go
index 150399de8..ae0e8b77a 100644
--- a/pkg/local_object_storage/writecache/put.go
+++ b/pkg/local_object_storage/writecache/put.go
@@ -115,11 +115,6 @@ func (c *cache) putBig(ctx context.Context, addr string, prm common.PutPrm) erro
return err
}
- if compressor := c.blobstor.Compressor(); compressor != nil && compressor.NeedsCompression(prm.Object) {
- c.mtx.Lock()
- c.compressFlags[addr] = struct{}{}
- c.mtx.Unlock()
- }
storagelog.Write(c.log,
storagelog.AddressField(addr),
storagelog.StorageTypeField(wcStorageType),
From a812932984531162648fdbfa985a6f496fdbd80e Mon Sep 17 00:00:00 2001
From: Airat Arifullin
Date: Tue, 10 Sep 2024 11:15:30 +0300
Subject: [PATCH 091/705] [#1362] ape: Move common APE check logic to separate
package
* Tree and object service have the same log for checking APE. So,
this check should be moved to common package.
Signed-off-by: Airat Arifullin
---
pkg/services/common/ape/checker.go | 167 +++++++++++++++++++++++++++++
pkg/services/object/ape/checker.go | 139 +++++-------------------
pkg/services/tree/ape.go | 116 ++------------------
pkg/services/tree/service.go | 5 +
4 files changed, 205 insertions(+), 222 deletions(-)
create mode 100644 pkg/services/common/ape/checker.go
diff --git a/pkg/services/common/ape/checker.go b/pkg/services/common/ape/checker.go
new file mode 100644
index 000000000..f24d22124
--- /dev/null
+++ b/pkg/services/common/ape/checker.go
@@ -0,0 +1,167 @@
+package ape
+
+import (
+ "crypto/ecdsa"
+ "errors"
+ "fmt"
+
+ aperequest "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/request"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/router"
+ frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ape"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
+ apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
+ apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
+ policyengine "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
+)
+
+var (
+ errInvalidTargetType = errors.New("bearer token defines non-container target override")
+ errBearerExpired = errors.New("bearer token has expired")
+ errBearerInvalidSignature = errors.New("bearer token has invalid signature")
+ errBearerInvalidContainerID = errors.New("bearer token was created for another container")
+ errBearerNotSignedByOwner = errors.New("bearer token is not signed by the container owner")
+ errBearerInvalidOwner = errors.New("bearer token owner differs from the request sender")
+)
+
+type CheckPrm struct {
+ // Request is an APE-request that is checked by policy engine.
+ Request aperequest.Request
+
+ Namespace string
+
+ Container cid.ID
+
+ // An encoded container's owner user ID.
+ ContainerOwner user.ID
+
+ // PublicKey is public key of the request sender.
+ PublicKey *keys.PublicKey
+
+ // The request's bearer token. It is used in order to check APE overrides with the token.
+ BearerToken *bearer.Token
+
+ // If SoftAPECheck is set to true, then NoRuleFound is interpreted as allow.
+ SoftAPECheck bool
+}
+
+// CheckCore provides methods to perform the common logic of APE check.
+type CheckCore interface {
+ // CheckAPE performs the common policy-engine check logic on a prepared request.
+ CheckAPE(prm CheckPrm) error
+}
+
+type checkerCoreImpl struct {
+ LocalOverrideStorage policyengine.LocalOverrideStorage
+ MorphChainStorage policyengine.MorphRuleChainStorageReader
+ FrostFSSubjectProvider frostfsidcore.SubjectProvider
+ State netmap.State
+}
+
+func New(localOverrideStorage policyengine.LocalOverrideStorage, morphChainStorage policyengine.MorphRuleChainStorageReader,
+ frostFSSubjectProvider frostfsidcore.SubjectProvider, state netmap.State) CheckCore {
+ return &checkerCoreImpl{
+ LocalOverrideStorage: localOverrideStorage,
+ MorphChainStorage: morphChainStorage,
+ FrostFSSubjectProvider: frostFSSubjectProvider,
+ State: state,
+ }
+}
+
+// CheckAPE performs the common policy-engine check logic on a prepared request.
+func (c *checkerCoreImpl) CheckAPE(prm CheckPrm) error {
+ var cr policyengine.ChainRouter
+ if prm.BearerToken != nil && !prm.BearerToken.Impersonate() {
+ var err error
+ if err = isValidBearer(prm.BearerToken, prm.ContainerOwner, prm.Container, prm.PublicKey, c.State); err != nil {
+ return fmt.Errorf("bearer validation error: %w", err)
+ }
+ cr, err = router.BearerChainFeedRouter(c.LocalOverrideStorage, c.MorphChainStorage, prm.BearerToken.APEOverride())
+ if err != nil {
+ return fmt.Errorf("create chain router error: %w", err)
+ }
+ } else {
+ cr = policyengine.NewDefaultChainRouterWithLocalOverrides(c.MorphChainStorage, c.LocalOverrideStorage)
+ }
+
+ groups, err := aperequest.Groups(c.FrostFSSubjectProvider, prm.PublicKey)
+ if err != nil {
+ return fmt.Errorf("failed to get group ids: %w", err)
+ }
+
+ // Policy contract keeps group related chains as namespace-group pair.
+ for i := range groups {
+ groups[i] = fmt.Sprintf("%s:%s", prm.Namespace, groups[i])
+ }
+
+ rt := policyengine.NewRequestTargetExtended(prm.Namespace, prm.Container.EncodeToString(), fmt.Sprintf("%s:%s", prm.Namespace, prm.PublicKey.Address()), groups)
+ status, found, err := cr.IsAllowed(apechain.Ingress, rt, prm.Request)
+ if err != nil {
+ return err
+ }
+ if !found && prm.SoftAPECheck || status == apechain.Allow {
+ return nil
+ }
+ err = fmt.Errorf("access to operation %s is denied by access policy engine: %s", prm.Request.Operation(), status.String())
+ return apeErr(err)
+}
+
+func apeErr(err error) error {
+ errAccessDenied := &apistatus.ObjectAccessDenied{}
+ errAccessDenied.WriteReason(err.Error())
+ return errAccessDenied
+}
+
+// isValidBearer checks whether bearer token was correctly signed by authorized
+// entity. This method might be defined on whole ACL service because it will
+// require fetching current epoch to check lifetime.
+func isValidBearer(token *bearer.Token, ownerCnr user.ID, cntID cid.ID, publicKey *keys.PublicKey, st netmap.State) error {
+ if token == nil {
+ return nil
+ }
+
+ // First check token lifetime. Simplest verification.
+ if token.InvalidAt(st.CurrentEpoch()) {
+ return errBearerExpired
+ }
+
+ // Then check if bearer token is signed correctly.
+ if !token.VerifySignature() {
+ return errBearerInvalidSignature
+ }
+
+ // Check for ape overrides defined in the bearer token.
+ apeOverride := token.APEOverride()
+ if len(apeOverride.Chains) > 0 && apeOverride.Target.TargetType != ape.TargetTypeContainer {
+ return fmt.Errorf("%w: %s", errInvalidTargetType, apeOverride.Target.TargetType.ToV2().String())
+ }
+
+ // Then check if container is either empty or equal to the container in the request.
+ var targetCnr cid.ID
+ err := targetCnr.DecodeString(apeOverride.Target.Name)
+ if err != nil {
+ return fmt.Errorf("invalid cid format: %s", apeOverride.Target.Name)
+ }
+ if !cntID.Equals(targetCnr) {
+ return errBearerInvalidContainerID
+ }
+
+ // Then check if container owner signed this token.
+ if !bearer.ResolveIssuer(*token).Equals(ownerCnr) {
+ return errBearerNotSignedByOwner
+ }
+
+ // Then check if request sender has rights to use this token.
+ var usrSender user.ID
+ user.IDFromKey(&usrSender, (ecdsa.PublicKey)(*publicKey))
+
+ if !token.AssertUser(usrSender) {
+ return errBearerInvalidOwner
+ }
+
+ return nil
+}
diff --git a/pkg/services/object/ape/checker.go b/pkg/services/object/ape/checker.go
index a1972292e..3688638d0 100644
--- a/pkg/services/object/ape/checker.go
+++ b/pkg/services/object/ape/checker.go
@@ -2,49 +2,41 @@ package ape
import (
"context"
- "crypto/ecdsa"
"errors"
"fmt"
objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
- aperequest "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/request"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/router"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ape"
+ checkercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/common/ape"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
- apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
policyengine "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
)
type checkerImpl struct {
- localOverrideStorage policyengine.LocalOverrideStorage
- morphChainStorage policyengine.MorphRuleChainStorageReader
- headerProvider HeaderProvider
- frostFSIDClient frostfsidcore.SubjectProvider
- nm netmap.Source
- st netmap.State
- cnrSource container.Source
- nodePK []byte
+ checkerCore checkercore.CheckCore
+ frostFSIDClient frostfsidcore.SubjectProvider
+ headerProvider HeaderProvider
+ nm netmap.Source
+ cnrSource container.Source
+ nodePK []byte
}
func NewChecker(localOverrideStorage policyengine.LocalOverrideStorage, morphChainStorage policyengine.MorphRuleChainStorageReader, headerProvider HeaderProvider, frostFSIDClient frostfsidcore.SubjectProvider, nm netmap.Source, st netmap.State, cnrSource container.Source, nodePK []byte) Checker {
return &checkerImpl{
- localOverrideStorage: localOverrideStorage,
- morphChainStorage: morphChainStorage,
- headerProvider: headerProvider,
- frostFSIDClient: frostFSIDClient,
- nm: nm,
- st: st,
- cnrSource: cnrSource,
- nodePK: nodePK,
+ checkerCore: checkercore.New(localOverrideStorage, morphChainStorage, frostFSIDClient, st),
+ frostFSIDClient: frostFSIDClient,
+ headerProvider: headerProvider,
+ nm: nm,
+ cnrSource: cnrSource,
+ nodePK: nodePK,
}
}
@@ -85,68 +77,9 @@ type Prm struct {
XHeaders []session.XHeader
}
-var (
- errMissingOID = errors.New("object ID is not set")
- errInvalidTargetType = errors.New("bearer token defines non-container target override")
- errBearerExpired = errors.New("bearer token has expired")
- errBearerInvalidSignature = errors.New("bearer token has invalid signature")
- errBearerInvalidContainerID = errors.New("bearer token was created for another container")
- errBearerNotSignedByOwner = errors.New("bearer token is not signed by the container owner")
- errBearerInvalidOwner = errors.New("bearer token owner differs from the request sender")
-)
+var errMissingOID = errors.New("object ID is not set")
-// isValidBearer checks whether bearer token was correctly signed by authorized
-// entity. This method might be defined on whole ACL service because it will
-// require fetching current epoch to check lifetime.
-func isValidBearer(token *bearer.Token, ownerCnr user.ID, containerID cid.ID, publicKey *keys.PublicKey, st netmap.State) error {
- if token == nil {
- return nil
- }
-
- // First check token lifetime. Simplest verification.
- if token.InvalidAt(st.CurrentEpoch()) {
- return errBearerExpired
- }
-
- // Then check if bearer token is signed correctly.
- if !token.VerifySignature() {
- return errBearerInvalidSignature
- }
-
- // Check for ape overrides defined in the bearer token.
- apeOverride := token.APEOverride()
- if len(apeOverride.Chains) > 0 && apeOverride.Target.TargetType != ape.TargetTypeContainer {
- return fmt.Errorf("%w: %s", errInvalidTargetType, apeOverride.Target.TargetType.ToV2().String())
- }
-
- // Then check if container is either empty or equal to the container in the request.
- var targetCnr cid.ID
- err := targetCnr.DecodeString(apeOverride.Target.Name)
- if err != nil {
- return fmt.Errorf("invalid cid format: %s", apeOverride.Target.Name)
- }
- if !containerID.Equals(targetCnr) {
- return errBearerInvalidContainerID
- }
-
- // Then check if container owner signed this token.
- if !bearer.ResolveIssuer(*token).Equals(ownerCnr) {
- return errBearerNotSignedByOwner
- }
-
- // Then check if request sender has rights to use this token.
- var usrSender user.ID
- user.IDFromKey(&usrSender, (ecdsa.PublicKey)(*publicKey))
-
- if !token.AssertUser(usrSender) {
- return errBearerInvalidOwner
- }
-
- return nil
-}
-
-// CheckAPE checks if a request or a response is permitted creating an ape request and passing
-// it to chain router.
+// CheckAPE prepares an APE-request and checks if it is permitted by policies.
func (c *checkerImpl) CheckAPE(ctx context.Context, prm Prm) error {
// APE check is ignored for some inter-node requests.
if prm.Role == nativeschema.PropertyValueContainerRoleContainer {
@@ -171,38 +104,14 @@ func (c *checkerImpl) CheckAPE(ctx context.Context, prm Prm) error {
if err != nil {
return err
}
- groups, err := aperequest.Groups(c.frostFSIDClient, pub)
- if err != nil {
- return fmt.Errorf("failed to get group ids: %w", err)
- }
- // Policy contract keeps group related chains as namespace-group pair.
- for i := range groups {
- groups[i] = fmt.Sprintf("%s:%s", prm.Namespace, groups[i])
- }
-
- var cr policyengine.ChainRouter
- if prm.BearerToken != nil && !prm.BearerToken.Impersonate() {
- if err := isValidBearer(prm.BearerToken, prm.ContainerOwner, prm.Container, pub, c.st); err != nil {
- return fmt.Errorf("bearer token validation error: %w", err)
- }
- cr, err = router.BearerChainFeedRouter(c.localOverrideStorage, c.morphChainStorage, prm.BearerToken.APEOverride())
- if err != nil {
- return fmt.Errorf("create chain router error: %w", err)
- }
- } else {
- cr = policyengine.NewDefaultChainRouterWithLocalOverrides(c.morphChainStorage, c.localOverrideStorage)
- }
-
- rt := policyengine.NewRequestTargetExtended(prm.Namespace, prm.Container.EncodeToString(), fmt.Sprintf("%s:%s", prm.Namespace, pub.Address()), groups)
- status, ruleFound, err := cr.IsAllowed(apechain.Ingress, rt, r)
- if err != nil {
- return err
- }
-
- if !ruleFound && prm.SoftAPECheck || status == apechain.Allow {
- return nil
- }
-
- return fmt.Errorf("method %s: %s", prm.Method, status)
+ return c.checkerCore.CheckAPE(checkercore.CheckPrm{
+ Request: r,
+ PublicKey: pub,
+ Namespace: prm.Method,
+ Container: prm.Container,
+ ContainerOwner: prm.ContainerOwner,
+ BearerToken: prm.BearerToken,
+ SoftAPECheck: prm.SoftAPECheck,
+ })
}
diff --git a/pkg/services/tree/ape.go b/pkg/services/tree/ape.go
index ee4687911..693b16e60 100644
--- a/pkg/services/tree/ape.go
+++ b/pkg/services/tree/ape.go
@@ -2,42 +2,25 @@ package tree
import (
"context"
- "crypto/ecdsa"
"encoding/hex"
- "errors"
"fmt"
"net"
"strings"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/converter"
aperequest "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/request"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/router"
core "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ape"
+ checkercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/common/ape"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cnrSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
- apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
- "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
commonschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/common"
nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"google.golang.org/grpc/peer"
)
-var (
- errInvalidTargetType = errors.New("bearer token defines non-container target override")
- errBearerExpired = errors.New("bearer token has expired")
- errBearerInvalidSignature = errors.New("bearer token has invalid signature")
- errBearerInvalidContainerID = errors.New("bearer token was created for another container")
- errBearerNotSignedByOwner = errors.New("bearer token is not signed by the container owner")
- errBearerInvalidOwner = errors.New("bearer token owner differs from the request sender")
-)
-
func (s *Service) newAPERequest(ctx context.Context, namespace string,
cid cid.ID, operation acl.Op, role acl.Role, publicKey *keys.PublicKey,
) (aperequest.Request, error) {
@@ -77,56 +60,6 @@ func (s *Service) newAPERequest(ctx context.Context, namespace string,
), nil
}
-// isValidBearer checks whether bearer token was correctly signed by authorized
-// entity. This method might be defined on whole ACL service because it will
-// require fetching current epoch to check lifetime.
-func isValidBearer(token *bearer.Token, ownerCnr user.ID, cntID cid.ID, publicKey *keys.PublicKey, st netmap.State) error {
- if token == nil {
- return nil
- }
-
- // First check token lifetime. Simplest verification.
- if token.InvalidAt(st.CurrentEpoch()) {
- return errBearerExpired
- }
-
- // Then check if bearer token is signed correctly.
- if !token.VerifySignature() {
- return errBearerInvalidSignature
- }
-
- // Check for ape overrides defined in the bearer token.
- apeOverride := token.APEOverride()
- if len(apeOverride.Chains) > 0 && apeOverride.Target.TargetType != ape.TargetTypeContainer {
- return fmt.Errorf("%w: %s", errInvalidTargetType, apeOverride.Target.TargetType.ToV2().String())
- }
-
- // Then check if container is either empty or equal to the container in the request.
- var targetCnr cid.ID
- err := targetCnr.DecodeString(apeOverride.Target.Name)
- if err != nil {
- return fmt.Errorf("invalid cid format: %s", apeOverride.Target.Name)
- }
- if !cntID.Equals(targetCnr) {
- return errBearerInvalidContainerID
- }
-
- // Then check if container owner signed this token.
- if !bearer.ResolveIssuer(*token).Equals(ownerCnr) {
- return errBearerNotSignedByOwner
- }
-
- // Then check if request sender has rights to use this token.
- var usrSender user.ID
- user.IDFromKey(&usrSender, (ecdsa.PublicKey)(*publicKey))
-
- if !token.AssertUser(usrSender) {
- return errBearerInvalidOwner
- }
-
- return nil
-}
-
func (s *Service) checkAPE(ctx context.Context, bt *bearer.Token,
container *core.Container, cid cid.ID, operation acl.Op, role acl.Role, publicKey *keys.PublicKey,
) error {
@@ -141,45 +74,14 @@ func (s *Service) checkAPE(ctx context.Context, bt *bearer.Token,
return fmt.Errorf("failed to create ape request: %w", err)
}
- var cr engine.ChainRouter
- if bt != nil && !bt.Impersonate() {
- if err := isValidBearer(bt, container.Value.Owner(), cid, publicKey, s.state); err != nil {
- return fmt.Errorf("bearer validation error: %w", err)
- }
- cr, err = router.BearerChainFeedRouter(s.localOverrideStorage, s.morphChainStorage, bt.APEOverride())
- if err != nil {
- return fmt.Errorf("create chain router error: %w", err)
- }
- } else {
- cr = engine.NewDefaultChainRouterWithLocalOverrides(s.morphChainStorage, s.localOverrideStorage)
- }
-
- groups, err := aperequest.Groups(s.frostfsidSubjectProvider, publicKey)
- if err != nil {
- return fmt.Errorf("failed to get group ids: %w", err)
- }
-
- // Policy contract keeps group related chains as namespace-group pair.
- for i := range groups {
- groups[i] = fmt.Sprintf("%s:%s", namespace, groups[i])
- }
-
- rt := engine.NewRequestTargetExtended(namespace, cid.EncodeToString(), fmt.Sprintf("%s:%s", namespace, publicKey.Address()), groups)
- status, found, err := cr.IsAllowed(apechain.Ingress, rt, request)
- if err != nil {
- return err
- }
- if found && status == apechain.Allow {
- return nil
- }
- err = fmt.Errorf("access to operation %s is denied by access policy engine: %s", request.Operation(), status.String())
- return apeErr(err)
-}
-
-func apeErr(err error) error {
- errAccessDenied := &apistatus.ObjectAccessDenied{}
- errAccessDenied.WriteReason(err.Error())
- return errAccessDenied
+ return s.apeChecker.CheckAPE(checkercore.CheckPrm{
+ Request: request,
+ Namespace: namespace,
+ Container: cid,
+ PublicKey: publicKey,
+ BearerToken: bt,
+ SoftAPECheck: false,
+ })
}
// fillWithUserClaimTags fills ape request properties with user claim tags getting them from frostfsid contract by actor public key.
diff --git a/pkg/services/tree/service.go b/pkg/services/tree/service.go
index 4da61617f..875e47ecb 100644
--- a/pkg/services/tree/service.go
+++ b/pkg/services/tree/service.go
@@ -10,6 +10,7 @@ import (
"sync/atomic"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
+ checkercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/common/ape"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
@@ -37,6 +38,8 @@ type Service struct {
initialSyncDone atomic.Bool
+ apeChecker checkercore.CheckCore
+
// cnrMap contains existing (used) container IDs.
cnrMap map[cidSDK.ID]struct{}
// cnrMapMtx protects cnrMap
@@ -72,6 +75,8 @@ func New(opts ...Option) *Service {
s.syncChan = make(chan struct{})
s.syncPool, _ = ants.NewPool(defaultSyncWorkerCount)
+ s.apeChecker = checkercore.New(s.localOverrideStorage, s.morphChainStorage, s.frostfsidSubjectProvider, s.state)
+
return &s
}
From 2220f6a8091d9b861fd6a86b7afc90320591e9b1 Mon Sep 17 00:00:00 2001
From: Alexander Chuprov
Date: Tue, 10 Sep 2024 16:45:15 +0300
Subject: [PATCH 092/705] [#1365] Makefile: Fix HUB_IMAGE
Signed-off-by: Alexander Chuprov
---
Makefile | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/Makefile b/Makefile
index 71492ef17..2f29ac19c 100755
--- a/Makefile
+++ b/Makefile
@@ -4,7 +4,7 @@ SHELL = bash
REPO ?= $(shell go list -m)
VERSION ?= $(shell git describe --tags --dirty --match "v*" --always --abbrev=8 2>/dev/null || cat VERSION 2>/dev/null || echo "develop")
-HUB_IMAGE ?= truecloudlab/frostfs
+HUB_IMAGE ?= git.frostfs.info/truecloudlab/frostfs
HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')"
GO_VERSION ?= 1.22
From 5fac4058e8cbc9ef8484b2ddaaf583574684dded Mon Sep 17 00:00:00 2001
From: Aleksey Savchuk
Date: Tue, 10 Sep 2024 12:14:46 +0300
Subject: [PATCH 093/705] [#1364] cmd/common: Add tests for CreateViper and
ReloadViper
Add tests for `CreateViper` and `ReloadViper` to ensure that no extra
files, except *.yaml, *.yml, *.json, are loaded from config directory.
Signed-off-by: Aleksey Savchuk
---
cmd/internal/common/config/viper_test.go | 107 +++++++++++++++++++++++
pkg/util/config/test/generate.go | 58 ++++++++++++
2 files changed, 165 insertions(+)
create mode 100644 cmd/internal/common/config/viper_test.go
create mode 100644 pkg/util/config/test/generate.go
diff --git a/cmd/internal/common/config/viper_test.go b/cmd/internal/common/config/viper_test.go
new file mode 100644
index 000000000..d533a15c2
--- /dev/null
+++ b/cmd/internal/common/config/viper_test.go
@@ -0,0 +1,107 @@
+package config_test
+
+import (
+ "encoding/json"
+ "os"
+ "path"
+ "testing"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/config"
+ configtest "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/config/test"
+ "github.com/spf13/viper"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "gopkg.in/yaml.v3"
+)
+
+func TestCreateReloadViper(t *testing.T) {
+ type m = map[string]any
+
+ dummyFileSize := 1 << 10
+
+ configPath := t.TempDir()
+ configFile := "000_a.yaml"
+
+ configDirPath := path.Join(configPath, "conf.d")
+ require.NoError(t, os.Mkdir(configDirPath, 0o700))
+
+ configtest.PrepareConfigFiles(t, configPath, []configtest.ConfigFile{
+ configtest.NewConfigFile(configFile, m{"a": "000"}, yaml.Marshal),
+ })
+
+ // Not valid configs, dummy files those appear lexicographically first.
+ configtest.PrepareDummyFiles(t, configDirPath, []configtest.DummyFile{
+ configtest.NewDummyFile("000_file_1", dummyFileSize),
+ configtest.NewDummyFile("000_file_2", dummyFileSize),
+ configtest.NewDummyFile("000_file_3", dummyFileSize),
+ })
+
+ configtest.PrepareConfigFiles(t, configDirPath, []configtest.ConfigFile{
+ // Valid configs with invalid extensions those appear lexicographically first.
+ configtest.NewConfigFile("001_a.yaml.un~", m{"a": "101"}, yaml.Marshal),
+ configtest.NewConfigFile("001_b.yml~", m{"b": m{"a": "102", "b": "103"}}, yaml.Marshal),
+ configtest.NewConfigFile("001_c.yaml.swp", m{"c": m{"a": "104", "b": "105"}}, yaml.Marshal),
+ configtest.NewConfigFile("001_d.json.swp", m{"d": m{"a": "106", "b": "107"}}, json.Marshal),
+
+ // Valid configs with valid extensions those should be loaded.
+ configtest.NewConfigFile("010_a.yaml", m{"a": "1"}, yaml.Marshal),
+ configtest.NewConfigFile("020_b.yml", m{"b": m{"a": "2", "b": "3"}}, yaml.Marshal),
+ configtest.NewConfigFile("030_c.json", m{"c": m{"a": "4", "b": "5"}}, json.Marshal),
+
+ // Valid configs with invalid extensions those appear lexicographically last.
+ configtest.NewConfigFile("099_a.yaml.un~", m{"a": "201"}, yaml.Marshal),
+ configtest.NewConfigFile("099_b.yml~", m{"b": m{"a": "202", "b": "203"}}, yaml.Marshal),
+ configtest.NewConfigFile("099_c.yaml.swp", m{"c": m{"a": "204", "b": "205"}}, yaml.Marshal),
+ configtest.NewConfigFile("099_c.json.swp", m{"d": m{"a": "206", "b": "207"}}, json.Marshal),
+ })
+
+ // Not valid configs, dummy files those appear lexicographically last.
+ configtest.PrepareDummyFiles(t, configDirPath, []configtest.DummyFile{
+ configtest.NewDummyFile("999_file_1", dummyFileSize),
+ configtest.NewDummyFile("999_file_2", dummyFileSize),
+ configtest.NewDummyFile("999_file_3", dummyFileSize),
+ })
+
+ finalConfig := m{"a": "1", "b": m{"a": "2", "b": "3"}, "c": m{"a": "4", "b": "5"}}
+
+ var (
+ v *viper.Viper
+ err error
+ )
+
+ t.Run("create config with config dir only", func(t *testing.T) {
+ v, err = config.CreateViper(
+ config.WithConfigDir(configDirPath),
+ )
+ require.NoError(t, err)
+ assert.Equal(t, finalConfig, v.AllSettings())
+ })
+
+ t.Run("reload config with config dir only", func(t *testing.T) {
+ err = config.ReloadViper(
+ config.WithViper(v),
+ config.WithConfigDir(configDirPath),
+ )
+ require.NoError(t, err)
+ assert.Equal(t, finalConfig, v.AllSettings())
+ })
+
+ t.Run("create config with both config and config dir", func(t *testing.T) {
+ v, err = config.CreateViper(
+ config.WithConfigFile(path.Join(configPath, configFile)),
+ config.WithConfigDir(configDirPath),
+ )
+ require.NoError(t, err)
+ assert.Equal(t, finalConfig, v.AllSettings())
+ })
+
+ t.Run("reload config with both config and config dir", func(t *testing.T) {
+ err = config.ReloadViper(
+ config.WithViper(v),
+ config.WithConfigFile(path.Join(configPath, configFile)),
+ config.WithConfigDir(configDirPath),
+ )
+ require.NoError(t, err)
+ assert.Equal(t, finalConfig, v.AllSettings())
+ })
+}
diff --git a/pkg/util/config/test/generate.go b/pkg/util/config/test/generate.go
new file mode 100644
index 000000000..63e286615
--- /dev/null
+++ b/pkg/util/config/test/generate.go
@@ -0,0 +1,58 @@
+package configtest
+
+import (
+ "crypto/rand"
+ "os"
+ "path"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+type MarshalFunc = func(any) ([]byte, error)
+
+type ConfigFile struct {
+ filename string
+ content map[string]any
+ marshal func(any) ([]byte, error)
+}
+
+type DummyFile struct {
+ filename string
+ size int
+}
+
+func NewConfigFile(filename string, content map[string]any, marshal MarshalFunc) ConfigFile {
+ return ConfigFile{
+ filename: filename,
+ content: content,
+ marshal: marshal,
+ }
+}
+
+func NewDummyFile(filename string, size int) DummyFile {
+ return DummyFile{
+ filename: filename,
+ size: size,
+ }
+}
+
+func PrepareConfigFiles(t *testing.T, dir string, files []ConfigFile) {
+ for _, file := range files {
+ data, err := file.marshal(file.content)
+ require.NoError(t, err)
+
+ err = os.WriteFile(path.Join(dir, file.filename), data, 0o600)
+ require.NoError(t, err)
+ }
+}
+
+func PrepareDummyFiles(t *testing.T, dir string, files []DummyFile) {
+ for _, file := range files {
+ data := make([]byte, file.size)
+ _, _ = rand.Read(data)
+
+ err := os.WriteFile(path.Join(dir, file.filename), data, 0o600)
+ require.NoError(t, err)
+ }
+}
From dea6f031f97664259d407bff6a320b295dc4b3d0 Mon Sep 17 00:00:00 2001
From: Ekaterina Lebedeva
Date: Mon, 9 Sep 2024 19:26:41 +0300
Subject: [PATCH 094/705] [#1331] cli/tree: Add order flag to `tree
get-subtree`
Added `--ordered` flag to sort output by ascending FileName.
Signed-off-by: Ekaterina Lebedeva
---
cmd/frostfs-cli/modules/tree/root.go | 1 +
cmd/frostfs-cli/modules/tree/subtree.go | 11 +++++++++++
2 files changed, 12 insertions(+)
diff --git a/cmd/frostfs-cli/modules/tree/root.go b/cmd/frostfs-cli/modules/tree/root.go
index efd1c08b5..5a53c50d6 100644
--- a/cmd/frostfs-cli/modules/tree/root.go
+++ b/cmd/frostfs-cli/modules/tree/root.go
@@ -49,6 +49,7 @@ const (
heightFlagKey = "height"
countFlagKey = "count"
depthFlagKey = "depth"
+ orderFlagKey = "ordered"
)
func initCTID(cmd *cobra.Command) {
diff --git a/cmd/frostfs-cli/modules/tree/subtree.go b/cmd/frostfs-cli/modules/tree/subtree.go
index e58a13fd6..e88ef79cb 100644
--- a/cmd/frostfs-cli/modules/tree/subtree.go
+++ b/cmd/frostfs-cli/modules/tree/subtree.go
@@ -30,6 +30,7 @@ func initGetSubtreeCmd() {
ff := getSubtreeCmd.Flags()
ff.Uint64(rootIDFlagKey, 0, "Root ID to traverse from.")
ff.Uint32(depthFlagKey, 10, "Traversal depth.")
+ ff.Bool(orderFlagKey, false, "Sort output by ascending FileName.")
_ = getSubtreeCmd.MarkFlagRequired(commonflags.CIDFlag)
_ = getSubtreeCmd.MarkFlagRequired(treeIDFlagKey)
@@ -59,6 +60,13 @@ func getSubTree(cmd *cobra.Command, _ []string) {
depth, _ := cmd.Flags().GetUint32(depthFlagKey)
+ order, _ := cmd.Flags().GetBool(orderFlagKey)
+
+ bodyOrder := tree.GetSubTreeRequest_Body_Order_None
+ if order {
+ bodyOrder = tree.GetSubTreeRequest_Body_Order_Asc
+ }
+
var bt []byte
if t := common.ReadBearerToken(cmd, bearerFlagKey); t != nil {
bt = t.Marshal()
@@ -71,6 +79,9 @@ func getSubTree(cmd *cobra.Command, _ []string) {
RootId: []uint64{rid},
Depth: depth,
BearerToken: bt,
+ OrderBy: &tree.GetSubTreeRequest_Body_Order{
+ Direction: bodyOrder,
+ },
},
}
From ec8da4056704d81107f514bcb998aa6f3dd7b07f Mon Sep 17 00:00:00 2001
From: Vitaliy Potyarkin
Date: Wed, 11 Sep 2024 12:40:04 +0300
Subject: [PATCH 095/705] [#1369] Update obsolete URLs
Signed-off-by: Vitaliy Potyarkin
---
README.md | 15 +++++++--------
cmd/frostfs-adm/docs/deploy.md | 4 ++--
config/testnet/README.md | 2 +-
docs/release-instruction.md | 6 +++---
4 files changed, 13 insertions(+), 14 deletions(-)
diff --git a/README.md b/README.md
index 8225f56c5..47d812b18 100644
--- a/README.md
+++ b/README.md
@@ -7,9 +7,8 @@
---
-[](https://goreportcard.com/report/github.com/TrueCloudLab/frostfs-node)
-
-
+[](https://goreportcard.com/report/git.frostfs.info/TrueCloudLab/frostfs-node)
+
# Overview
@@ -33,8 +32,8 @@ manipulate large amounts of data without paying a prohibitive price.
FrostFS has a native [gRPC API](https://git.frostfs.info/TrueCloudLab/frostfs-api) and has
protocol gateways for popular protocols such as [AWS
-S3](https://github.com/TrueCloudLab/frostfs-s3-gw),
-[HTTP](https://github.com/TrueCloudLab/frostfs-http-gw),
+S3](https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw),
+[HTTP](https://git.frostfs.info/TrueCloudLab/frostfs-http-gw),
[FUSE](https://wikipedia.org/wiki/Filesystem_in_Userspace) and
[sFTP](https://en.wikipedia.org/wiki/SSH_File_Transfer_Protocol) allowing
developers to integrate applications without rewriting their code.
@@ -45,7 +44,7 @@ Now, we only support GNU/Linux on amd64 CPUs with AVX/AVX2 instructions. More
platforms will be officially supported after release `1.0`.
The latest version of frostfs-node works with frostfs-contract
-[v0.16.0](https://github.com/TrueCloudLab/frostfs-contract/releases/tag/v0.16.0).
+[v0.19.2](https://git.frostfs.info/TrueCloudLab/frostfs-contract/releases/tag/v0.19.2).
# Building
@@ -71,7 +70,7 @@ make docker/bin/frostfs- # build a specific binary
## Docker images
-To make docker images suitable for use in [frostfs-dev-env](https://github.com/TrueCloudLab/frostfs-dev-env/) use:
+To make docker images suitable for use in [frostfs-dev-env](https://git.frostfs.info/TrueCloudLab/frostfs-dev-env/) use:
```
make images
```
@@ -125,7 +124,7 @@ the feature/topic you are going to implement.
# Credits
-FrostFS is maintained by [True Cloud Lab](https://github.com/TrueCloudLab/) with the help and
+FrostFS is maintained by [True Cloud Lab](https://git.frostfs.info/TrueCloudLab/) with the help and
contributions from community members.
Please see [CREDITS](CREDITS.md) for details.
diff --git a/cmd/frostfs-adm/docs/deploy.md b/cmd/frostfs-adm/docs/deploy.md
index 87d2e47c1..b4b1ed8e4 100644
--- a/cmd/frostfs-adm/docs/deploy.md
+++ b/cmd/frostfs-adm/docs/deploy.md
@@ -9,8 +9,8 @@ related configuration details.
To follow this guide you need:
- latest released version of [neo-go](https://github.com/nspcc-dev/neo-go/releases) (v0.97.2 at the moment),
-- latest released version of [frostfs-adm](https://github.com/TrueCloudLab/frostfs-node/releases) utility (v0.25.1 at the moment),
-- latest released version of compiled [frostfs-contract](https://github.com/TrueCloudLab/frostfs-contract/releases) (v0.11.0 at the moment).
+- latest released version of [frostfs-adm](https://git.frostfs.info/TrueCloudLab/frostfs-node/releases) utility (v0.42.9 at the moment),
+- latest released version of compiled [frostfs-contract](https://git.frostfs.info/TrueCloudLab/frostfs-contract/releases) (v0.19.2 at the moment).
## Step 1: Prepare network configuration
diff --git a/config/testnet/README.md b/config/testnet/README.md
index b5faf2b27..e2cda33ec 100644
--- a/config/testnet/README.md
+++ b/config/testnet/README.md
@@ -67,7 +67,7 @@ NEOFS_NODE_ATTRIBUTE_2=UN-LOCODE:RU LED
```
You can validate UN/LOCODE attribute in
-[NeoFS LOCODE database](https://github.com/TrueCloudLab/frostfs-locode-db/releases/tag/v0.1.0)
+[NeoFS LOCODE database](https://git.frostfs.info/TrueCloudLab/frostfs-locode-db/releases/tag/v0.4.0)
with frostfs-cli.
```
diff --git a/docs/release-instruction.md b/docs/release-instruction.md
index ec7b8cdf3..3aebc8e66 100644
--- a/docs/release-instruction.md
+++ b/docs/release-instruction.md
@@ -9,7 +9,7 @@ These should run successfully:
* `make lint` (should not change any files);
* `make fmts` (should not change any files);
* `go mod tidy` (should not change any files);
-* integration tests in [frostfs-devenv](https://github.com/TrueCloudLab/frostfs-devenv).
+* integration tests in [frostfs-devenv](https://git.frostfs.info/TrueCloudLab/frostfs-dev-env).
## Make release commit
@@ -123,12 +123,12 @@ the release. Publish the release.
### Update FrostFS Developer Environment
-Prepare pull-request in [frostfs-devenv](https://github.com/TrueCloudLab/frostfs-devenv)
+Prepare pull-request in [frostfs-devenv](https://git.frostfs.info/TrueCloudLab/frostfs-dev-env)
with new versions.
### Close GitHub milestone
-Look up GitHub [milestones](https://github.com/TrueCloudLab/frostfs-node/milestones) and close the release one if exists.
+Look up [milestones](https://git.frostfs.info/TrueCloudLab/frostfs-node/milestones) and close the release one if exists.
### Rebuild FrostFS LOCODE database
From 99be4c83a7c8ab6717ef5242c80f9ccc51d470d7 Mon Sep 17 00:00:00 2001
From: Aleksey Savchuk
Date: Thu, 12 Sep 2024 10:00:28 +0300
Subject: [PATCH 096/705] [#1368] *: Run gofumpt
Signed-off-by: Aleksey Savchuk
---
pkg/services/common/ape/checker.go | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/pkg/services/common/ape/checker.go b/pkg/services/common/ape/checker.go
index f24d22124..278f6da31 100644
--- a/pkg/services/common/ape/checker.go
+++ b/pkg/services/common/ape/checker.go
@@ -63,7 +63,8 @@ type checkerCoreImpl struct {
}
func New(localOverrideStorage policyengine.LocalOverrideStorage, morphChainStorage policyengine.MorphRuleChainStorageReader,
- frostFSSubjectProvider frostfsidcore.SubjectProvider, state netmap.State) CheckCore {
+ frostFSSubjectProvider frostfsidcore.SubjectProvider, state netmap.State,
+) CheckCore {
return &checkerCoreImpl{
LocalOverrideStorage: localOverrideStorage,
MorphChainStorage: morphChainStorage,
From 66e17f4b8e968e1d58924ee9a38b720d0df7989c Mon Sep 17 00:00:00 2001
From: Aleksey Savchuk
Date: Thu, 12 Sep 2024 10:01:27 +0300
Subject: [PATCH 097/705] [#1368] cli/container: Use dedicated method to list
user attributes
Signed-off-by: Aleksey Savchuk
---
cmd/frostfs-cli/modules/container/list.go | 11 ++---------
1 file changed, 2 insertions(+), 9 deletions(-)
diff --git a/cmd/frostfs-cli/modules/container/list.go b/cmd/frostfs-cli/modules/container/list.go
index 6d0019ec4..f01e4db4d 100644
--- a/cmd/frostfs-cli/modules/container/list.go
+++ b/cmd/frostfs-cli/modules/container/list.go
@@ -1,9 +1,6 @@
package container
import (
- "strings"
-
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container"
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
@@ -84,12 +81,8 @@ var listContainersCmd = &cobra.Command{
cmd.Println(cnrID.String())
if flagVarListPrintAttr {
- cnr.IterateAttributes(func(key, val string) {
- if !strings.HasPrefix(key, container.SysAttributePrefix) && !strings.HasPrefix(key, container.SysAttributePrefixNeoFS) {
- // FIXME(@cthulhu-rider): https://git.frostfs.info/TrueCloudLab/frostfs-sdk-go/issues/97
- // Use dedicated method to skip system attributes.
- cmd.Printf(" %s: %s\n", key, val)
- }
+ cnr.IterateUserAttributes(func(key, val string) {
+ cmd.Printf(" %s: %s\n", key, val)
})
}
}
From 5f6c7cbdb102c51e4e994198f805a33084f3b9de Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Mon, 9 Sep 2024 18:37:06 +0300
Subject: [PATCH 098/705] [#1367] writecache: Drop bbolt DB
Signed-off-by: Dmitrii Stepanov
---
internal/logs/logs.go | 2 +-
.../writecache/{cachebbolt.go => cache.go} | 15 +-
pkg/local_object_storage/writecache/delete.go | 39 +---
pkg/local_object_storage/writecache/flush.go | 192 ++++--------------
.../writecache/flush_test.go | 28 +--
pkg/local_object_storage/writecache/get.go | 14 +-
pkg/local_object_storage/writecache/mode.go | 39 ++--
.../writecache/mode_test.go | 8 +-
.../writecache/options.go | 11 -
pkg/local_object_storage/writecache/put.go | 53 +----
pkg/local_object_storage/writecache/state.go | 35 +---
.../writecache/storage.go | 61 ------
12 files changed, 82 insertions(+), 415 deletions(-)
rename pkg/local_object_storage/writecache/{cachebbolt.go => cache.go} (94%)
diff --git a/internal/logs/logs.go b/internal/logs/logs.go
index 97b189529..87e4e0b43 100644
--- a/internal/logs/logs.go
+++ b/internal/logs/logs.go
@@ -542,6 +542,6 @@ const (
StartedWritecacheSealAsync = "started writecache seal async"
WritecacheSealCompletedAsync = "writecache seal completed successfully"
FailedToSealWritecacheAsync = "failed to seal writecache async"
- WritecacheShrinkSkippedNotEmpty = "writecache shrink skipped: database is not empty"
+ WritecacheShrinkSkippedNotEmpty = "writecache shrink skipped: not empty"
BlobovniczatreeFailedToRemoveRebuildTempFile = "failed to remove rebuild temp file"
)
diff --git a/pkg/local_object_storage/writecache/cachebbolt.go b/pkg/local_object_storage/writecache/cache.go
similarity index 94%
rename from pkg/local_object_storage/writecache/cachebbolt.go
rename to pkg/local_object_storage/writecache/cache.go
index f1e6a619a..ff38de407 100644
--- a/pkg/local_object_storage/writecache/cachebbolt.go
+++ b/pkg/local_object_storage/writecache/cache.go
@@ -2,7 +2,7 @@ package writecache
import (
"context"
- "os"
+ "fmt"
"sync"
"sync/atomic"
@@ -27,8 +27,6 @@ type cache struct {
cancel atomic.Value
// wg is a wait group for flush workers.
wg sync.WaitGroup
- // store contains underlying database.
- store
// fsTree contains big files stored directly on file-system.
fsTree *fstree.FSTree
}
@@ -67,7 +65,6 @@ func New(opts ...Option) Cache {
maxCacheSize: defaultMaxCacheSize,
maxBatchSize: bbolt.DefaultMaxBatchSize,
maxBatchDelay: bbolt.DefaultMaxBatchDelay,
- openFile: os.OpenFile,
metrics: DefaultMetrics(),
},
}
@@ -102,13 +99,15 @@ func (c *cache) Open(_ context.Context, mod mode.Mode) error {
if err != nil {
return metaerr.Wrap(err)
}
-
return metaerr.Wrap(c.initCounters())
}
// Init runs necessary services.
func (c *cache) Init() error {
c.metrics.SetMode(mode.ConvertToComponentModeDegraded(c.mode))
+ if err := c.flushAndDropBBoltDB(context.Background()); err != nil {
+ return fmt.Errorf("flush previous version write-cache database: %w", err)
+ }
ctx, cancel := context.WithCancel(context.Background())
c.cancel.Store(cancel)
c.runFlushLoop(ctx)
@@ -132,10 +131,10 @@ func (c *cache) Close() error {
defer c.modeMtx.Unlock()
var err error
- if c.db != nil {
- err = c.db.Close()
+ if c.fsTree != nil {
+ err = c.fsTree.Close()
if err != nil {
- c.db = nil
+ c.fsTree = nil
}
}
c.metrics.Close()
diff --git a/pkg/local_object_storage/writecache/delete.go b/pkg/local_object_storage/writecache/delete.go
index b1a0511ee..dda284439 100644
--- a/pkg/local_object_storage/writecache/delete.go
+++ b/pkg/local_object_storage/writecache/delete.go
@@ -2,7 +2,6 @@ package writecache
import (
"context"
- "math"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
@@ -10,7 +9,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "go.etcd.io/bbolt"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
)
@@ -45,46 +43,11 @@ func (c *cache) Delete(ctx context.Context, addr oid.Address) error {
return ErrDegraded
}
- saddr := addr.EncodeToString()
-
- var dataSize int
- _ = c.db.View(func(tx *bbolt.Tx) error {
- b := tx.Bucket(defaultBucket)
- dataSize = len(b.Get([]byte(saddr)))
- return nil
- })
-
- if dataSize > 0 {
- storageType = StorageTypeDB
- var recordDeleted bool
- err := c.db.Update(func(tx *bbolt.Tx) error {
- b := tx.Bucket(defaultBucket)
- key := []byte(saddr)
- recordDeleted = b.Get(key) != nil
- err := b.Delete(key)
- return err
- })
- if err != nil {
- return err
- }
- storagelog.Write(c.log,
- storagelog.AddressField(saddr),
- storagelog.StorageTypeField(wcStorageType),
- storagelog.OpField("db DELETE"),
- )
- if recordDeleted {
- c.objCounters.cDB.Add(math.MaxUint64)
- c.estimateCacheSize()
- }
- deleted = true
- return nil
- }
-
storageType = StorageTypeFSTree
_, err := c.fsTree.Delete(ctx, common.DeletePrm{Address: addr})
if err == nil {
storagelog.Write(c.log,
- storagelog.AddressField(saddr),
+ storagelog.AddressField(addr.EncodeToString()),
storagelog.StorageTypeField(wcStorageType),
storagelog.OpField("fstree DELETE"),
)
diff --git a/pkg/local_object_storage/writecache/flush.go b/pkg/local_object_storage/writecache/flush.go
index 930ac8431..074756e32 100644
--- a/pkg/local_object_storage/writecache/flush.go
+++ b/pkg/local_object_storage/writecache/flush.go
@@ -4,6 +4,9 @@ import (
"bytes"
"context"
"errors"
+ "fmt"
+ "os"
+ "path/filepath"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
@@ -16,7 +19,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "github.com/mr-tron/base58"
"go.etcd.io/bbolt"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
@@ -24,10 +26,6 @@ import (
)
const (
- // flushBatchSize is amount of keys which will be read from cache to be flushed
- // to the main storage. It is used to reduce contention between cache put
- // and cache persist.
- flushBatchSize = 512
// defaultFlushWorkersCount is number of workers for putting objects in main storage.
defaultFlushWorkersCount = 20
// defaultFlushInterval is default time interval between successive flushes.
@@ -41,112 +39,11 @@ func (c *cache) runFlushLoop(ctx context.Context) {
if c.disableBackgroundFlush {
return
}
- for range c.workersCount {
- c.wg.Add(1)
- go c.workerFlushSmall(ctx)
- }
-
c.wg.Add(1)
go func() {
c.workerFlushBig(ctx)
c.wg.Done()
}()
-
- c.wg.Add(1)
- go func() {
- defer c.wg.Done()
-
- tt := time.NewTimer(defaultFlushInterval)
- defer tt.Stop()
-
- for {
- select {
- case <-tt.C:
- c.flushSmallObjects(ctx)
- tt.Reset(defaultFlushInterval)
- c.estimateCacheSize()
- case <-ctx.Done():
- return
- }
- }
- }()
-}
-
-func (c *cache) flushSmallObjects(ctx context.Context) {
- var lastKey []byte
- for {
- select {
- case <-ctx.Done():
- return
- default:
- }
-
- var m []objectInfo
-
- c.modeMtx.RLock()
- if c.readOnly() {
- c.modeMtx.RUnlock()
- time.Sleep(time.Second)
- continue
- }
-
- // We put objects in batches of fixed size to not interfere with main put cycle a lot.
- _ = c.db.View(func(tx *bbolt.Tx) error {
- b := tx.Bucket(defaultBucket)
- cs := b.Cursor()
-
- var k, v []byte
-
- if len(lastKey) == 0 {
- k, v = cs.First()
- } else {
- k, v = cs.Seek(lastKey)
- if bytes.Equal(k, lastKey) {
- k, v = cs.Next()
- }
- }
-
- for ; k != nil && len(m) < flushBatchSize; k, v = cs.Next() {
- if len(lastKey) == len(k) {
- copy(lastKey, k)
- } else {
- lastKey = bytes.Clone(k)
- }
-
- m = append(m, objectInfo{
- addr: string(k),
- data: bytes.Clone(v),
- })
- }
- return nil
- })
-
- var count int
- for i := range m {
- obj := objectSDK.New()
- if err := obj.Unmarshal(m[i].data); err != nil {
- continue
- }
- m[i].obj = obj
-
- count++
- select {
- case c.flushCh <- m[i]:
- case <-ctx.Done():
- c.modeMtx.RUnlock()
- return
- }
- }
-
- c.modeMtx.RUnlock()
- if count == 0 {
- break
- }
-
- c.log.Debug(logs.WritecacheTriedToFlushItemsFromWritecache,
- zap.Int("count", count),
- zap.String("start", base58.Encode(lastKey)))
- }
}
func (c *cache) workerFlushBig(ctx context.Context) {
@@ -197,9 +94,6 @@ func (c *cache) flushFSTree(ctx context.Context, ignoreErrors bool) error {
err = c.flushObject(ctx, &obj, e.ObjectData, StorageTypeFSTree)
if err != nil {
- if ignoreErrors {
- return nil
- }
return err
}
@@ -211,29 +105,6 @@ func (c *cache) flushFSTree(ctx context.Context, ignoreErrors bool) error {
return err
}
-// workerFlushSmall writes small objects to the main storage.
-func (c *cache) workerFlushSmall(ctx context.Context) {
- defer c.wg.Done()
-
- var objInfo objectInfo
- for {
- // Give priority to direct put.
- select {
- case objInfo = <-c.flushCh:
- case <-ctx.Done():
- return
- }
-
- err := c.flushObject(ctx, objInfo.obj, objInfo.data, StorageTypeDB)
- if err != nil {
- // Error is handled in flushObject.
- continue
- }
-
- c.deleteFromDB(objInfo.addr, true)
- }
-}
-
// flushObject is used to write object directly to the main storage.
func (c *cache) flushObject(ctx context.Context, obj *objectSDK.Object, data []byte, st StorageType) error {
var err error
@@ -300,13 +171,33 @@ func (c *cache) Flush(ctx context.Context, ignoreErrors, seal bool) error {
}
func (c *cache) flush(ctx context.Context, ignoreErrors bool) error {
- if err := c.flushFSTree(ctx, ignoreErrors); err != nil {
- return err
+ return c.flushFSTree(ctx, ignoreErrors)
+}
+
+type batchItem struct {
+ data []byte
+ address string
+}
+
+func (c *cache) flushAndDropBBoltDB(ctx context.Context) error {
+ _, err := os.Stat(filepath.Join(c.path, dbName))
+ if err != nil && os.IsNotExist(err) {
+ return nil
}
+ if err != nil {
+ return fmt.Errorf("could not check write-cache database existence: %w", err)
+ }
+ db, err := OpenDB(c.path, true, os.OpenFile, c.pageSize)
+ if err != nil {
+ return fmt.Errorf("could not open write-cache database: %w", err)
+ }
+ defer func() {
+ _ = db.Close()
+ }()
var last string
for {
- batch, err := c.readNextDBBatch(ignoreErrors, last)
+ batch, err := c.readNextDBBatch(db, last)
if err != nil {
return err
}
@@ -316,32 +207,27 @@ func (c *cache) flush(ctx context.Context, ignoreErrors bool) error {
for _, item := range batch {
var obj objectSDK.Object
if err := obj.Unmarshal(item.data); err != nil {
- c.reportFlushError(logs.FSTreeCantDecodeDBObjectAddress, item.address, metaerr.Wrap(err))
- if ignoreErrors {
- continue
- }
- return err
+ return fmt.Errorf("unmarshal object from database: %w", err)
}
-
if err := c.flushObject(ctx, &obj, item.data, StorageTypeDB); err != nil {
- return err
+ return fmt.Errorf("flush object from database: %w", err)
}
- c.deleteFromDB(item.address, false)
}
last = batch[len(batch)-1].address
}
+ if err := db.Close(); err != nil {
+ return fmt.Errorf("close write-cache database: %w", err)
+ }
+ if err := os.Remove(filepath.Join(c.path, dbName)); err != nil {
+ return fmt.Errorf("remove write-cache database: %w", err)
+ }
return nil
}
-type batchItem struct {
- data []byte
- address string
-}
-
-func (c *cache) readNextDBBatch(ignoreErrors bool, last string) ([]batchItem, error) {
+func (c *cache) readNextDBBatch(db *bbolt.DB, last string) ([]batchItem, error) {
const batchSize = 100
var batch []batchItem
- err := c.db.View(func(tx *bbolt.Tx) error {
+ err := db.View(func(tx *bbolt.Tx) error {
var addr oid.Address
b := tx.Bucket(defaultBucket)
@@ -352,11 +238,7 @@ func (c *cache) readNextDBBatch(ignoreErrors bool, last string) ([]batchItem, er
continue
}
if err := addr.DecodeString(sa); err != nil {
- c.reportFlushError(logs.FSTreeCantDecodeDBObjectAddress, sa, metaerr.Wrap(err))
- if ignoreErrors {
- continue
- }
- return err
+ return fmt.Errorf("decode address from database: %w", err)
}
batch = append(batch, batchItem{data: bytes.Clone(data), address: sa})
diff --git a/pkg/local_object_storage/writecache/flush_test.go b/pkg/local_object_storage/writecache/flush_test.go
index a637da45d..9c7e240e0 100644
--- a/pkg/local_object_storage/writecache/flush_test.go
+++ b/pkg/local_object_storage/writecache/flush_test.go
@@ -19,7 +19,6 @@ import (
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
"github.com/stretchr/testify/require"
- "go.etcd.io/bbolt"
"go.uber.org/zap"
)
@@ -47,31 +46,6 @@ func TestFlush(t *testing.T) {
}
failures := []TestFailureInjector[Option]{
- {
- Desc: "db, invalid address",
- InjectFn: func(t *testing.T, wc Cache) {
- c := wc.(*cache)
- obj := testutil.GenerateObject()
- data, err := obj.Marshal()
- require.NoError(t, err)
- require.NoError(t, c.db.Batch(func(tx *bbolt.Tx) error {
- b := tx.Bucket(defaultBucket)
- return b.Put([]byte{1, 2, 3}, data)
- }))
- },
- },
- {
- Desc: "db, invalid object",
- InjectFn: func(t *testing.T, wc Cache) {
- c := wc.(*cache)
- require.NoError(t, c.db.Batch(func(tx *bbolt.Tx) error {
- b := tx.Bucket(defaultBucket)
- k := []byte(oidtest.Address().EncodeToString())
- v := []byte{1, 2, 3}
- return b.Put(k, v)
- }))
- },
- },
{
Desc: "fs, read error",
InjectFn: func(t *testing.T, wc Cache) {
@@ -263,7 +237,7 @@ func check(t *testing.T, mb *meta.DB, bs *blobstor.BlobStor, objects []objectPai
prm.StorageID = mRes.StorageID()
res, err := bs.Get(context.Background(), prm)
- require.NoError(t, err)
+ require.NoError(t, err, objects[i].addr)
require.Equal(t, objects[i].obj, res.Object)
}
}
diff --git a/pkg/local_object_storage/writecache/get.go b/pkg/local_object_storage/writecache/get.go
index bf26833bd..c0847a65f 100644
--- a/pkg/local_object_storage/writecache/get.go
+++ b/pkg/local_object_storage/writecache/get.go
@@ -37,11 +37,11 @@ func (c *cache) Get(ctx context.Context, addr oid.Address) (*objectSDK.Object, e
return nil, ErrDegraded
}
- obj, err := c.getInternal(ctx, saddr, addr)
+ obj, err := c.getInternal(ctx, addr)
return obj, metaerr.Wrap(err)
}
-func (c *cache) getInternal(ctx context.Context, saddr string, addr oid.Address) (*objectSDK.Object, error) {
+func (c *cache) getInternal(ctx context.Context, addr oid.Address) (*objectSDK.Object, error) {
found := false
storageType := StorageTypeUndefined
startedAt := time.Now()
@@ -49,14 +49,6 @@ func (c *cache) getInternal(ctx context.Context, saddr string, addr oid.Address)
c.metrics.Get(time.Since(startedAt), found, storageType)
}()
- value, err := Get(c.db, []byte(saddr))
- if err == nil {
- obj := objectSDK.New()
- found = true
- storageType = StorageTypeDB
- return obj, obj.Unmarshal(value)
- }
-
res, err := c.fsTree.Get(ctx, common.GetPrm{Address: addr})
if err != nil {
return nil, logicerr.Wrap(new(apistatus.ObjectNotFound))
@@ -87,7 +79,7 @@ func (c *cache) Head(ctx context.Context, addr oid.Address) (*objectSDK.Object,
return nil, ErrDegraded
}
- obj, err := c.getInternal(ctx, saddr, addr)
+ obj, err := c.getInternal(ctx, addr)
if err != nil {
return nil, metaerr.Wrap(err)
}
diff --git a/pkg/local_object_storage/writecache/mode.go b/pkg/local_object_storage/writecache/mode.go
index 44da9b36e..d12dd603b 100644
--- a/pkg/local_object_storage/writecache/mode.go
+++ b/pkg/local_object_storage/writecache/mode.go
@@ -5,13 +5,12 @@ import (
"errors"
"fmt"
"os"
- "path/filepath"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
- "go.etcd.io/bbolt"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
)
@@ -53,7 +52,7 @@ func (c *cache) setMode(ctx context.Context, m mode.Mode, prm setModePrm) error
}
}
- if err := c.closeDB(prm.shrink); err != nil {
+ if err := c.closeStorage(ctx, prm.shrink); err != nil {
return err
}
@@ -78,33 +77,37 @@ func (c *cache) setMode(ctx context.Context, m mode.Mode, prm setModePrm) error
return nil
}
-func (c *cache) closeDB(shrink bool) error {
- if c.db == nil {
+func (c *cache) closeStorage(ctx context.Context, shrink bool) error {
+ if c.fsTree == nil {
return nil
}
if !shrink {
- if err := c.db.Close(); err != nil {
- return fmt.Errorf("can't close write-cache database: %w", err)
+ if err := c.fsTree.Close(); err != nil {
+ return fmt.Errorf("can't close write-cache storage: %w", err)
}
return nil
}
- var empty bool
- err := c.db.View(func(tx *bbolt.Tx) error {
- b := tx.Bucket(defaultBucket)
- empty = b == nil || b.Stats().KeyN == 0
- return nil
+ empty := true
+ _, err := c.fsTree.Iterate(ctx, common.IteratePrm{
+ Handler: func(common.IterationElement) error {
+ return errIterationCompleted
+ },
})
- if err != nil && !errors.Is(err, bbolt.ErrDatabaseNotOpen) {
- return fmt.Errorf("failed to check DB items: %w", err)
+ if err != nil {
+ if errors.Is(err, errIterationCompleted) {
+ empty = false
+ } else {
+ return fmt.Errorf("failed to check write-cache items: %w", err)
+ }
}
- if err := c.db.Close(); err != nil {
- return fmt.Errorf("can't close write-cache database: %w", err)
+ if err := c.fsTree.Close(); err != nil {
+ return fmt.Errorf("can't close write-cache storage: %w", err)
}
if empty {
- err := os.Remove(filepath.Join(c.path, dbName))
+ err := os.RemoveAll(c.path)
if err != nil && !os.IsNotExist(err) {
- return fmt.Errorf("failed to remove DB file: %w", err)
+ return fmt.Errorf("failed to remove write-cache files: %w", err)
}
} else {
c.log.Info(logs.WritecacheShrinkSkippedNotEmpty)
diff --git a/pkg/local_object_storage/writecache/mode_test.go b/pkg/local_object_storage/writecache/mode_test.go
index f684c15bc..70cfe8382 100644
--- a/pkg/local_object_storage/writecache/mode_test.go
+++ b/pkg/local_object_storage/writecache/mode_test.go
@@ -17,14 +17,14 @@ func TestMode(t *testing.T) {
WithPath(t.TempDir()))
require.NoError(t, wc.Open(context.Background(), mode.DegradedReadOnly))
- require.Nil(t, wc.(*cache).db)
+ require.Nil(t, wc.(*cache).fsTree)
require.NoError(t, wc.Init())
- require.Nil(t, wc.(*cache).db)
+ require.Nil(t, wc.(*cache).fsTree)
require.NoError(t, wc.Close())
require.NoError(t, wc.Open(context.Background(), mode.Degraded))
- require.Nil(t, wc.(*cache).db)
+ require.Nil(t, wc.(*cache).fsTree)
require.NoError(t, wc.Init())
- require.Nil(t, wc.(*cache).db)
+ require.Nil(t, wc.(*cache).fsTree)
require.NoError(t, wc.Close())
}
diff --git a/pkg/local_object_storage/writecache/options.go b/pkg/local_object_storage/writecache/options.go
index 980cf9303..7845c5da9 100644
--- a/pkg/local_object_storage/writecache/options.go
+++ b/pkg/local_object_storage/writecache/options.go
@@ -1,8 +1,6 @@
package writecache
import (
- "io/fs"
- "os"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
@@ -42,8 +40,6 @@ type options struct {
noSync bool
// reportError is the function called when encountering disk errors in background workers.
reportError func(string, error)
- // openFile is the function called internally by bbolt to open database files. Useful for hermetic testing.
- openFile func(string, int, fs.FileMode) (*os.File, error)
// metrics is metrics implementation
metrics Metrics
// disableBackgroundFlush is for testing purposes only.
@@ -155,13 +151,6 @@ func WithReportErrorFunc(f func(string, error)) Option {
}
}
-// WithOpenFile sets the OpenFile function to use internally by bolt. Useful for hermetic testing.
-func WithOpenFile(f func(string, int, fs.FileMode) (*os.File, error)) Option {
- return func(o *options) {
- o.openFile = f
- }
-}
-
// WithMetrics sets metrics implementation.
func WithMetrics(metrics Metrics) Option {
return func(o *options) {
diff --git a/pkg/local_object_storage/writecache/put.go b/pkg/local_object_storage/writecache/put.go
index ae0e8b77a..c53067bea 100644
--- a/pkg/local_object_storage/writecache/put.go
+++ b/pkg/local_object_storage/writecache/put.go
@@ -8,7 +8,6 @@ import (
storagelog "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/log"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
- "go.etcd.io/bbolt"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
)
@@ -50,62 +49,16 @@ func (c *cache) Put(ctx context.Context, prm common.PutPrm) (common.PutRes, erro
return common.PutRes{}, ErrBigObject
}
- oi := objectInfo{
- addr: prm.Address.EncodeToString(),
- obj: prm.Object,
- data: prm.RawData,
- }
-
- if sz <= c.smallObjectSize {
- storageType = StorageTypeDB
- err := c.putSmall(oi)
- if err == nil {
- added = true
- }
- return common.PutRes{}, err
- }
-
storageType = StorageTypeFSTree
- err := c.putBig(ctx, oi.addr, prm)
+ err := c.putBig(ctx, prm)
if err == nil {
added = true
}
return common.PutRes{}, metaerr.Wrap(err)
}
-// putSmall persists small objects to the write-cache database and
-// pushes the to the flush workers queue.
-func (c *cache) putSmall(obj objectInfo) error {
- if !c.hasEnoughSpaceDB() {
- return ErrOutOfSpace
- }
-
- var newRecord bool
- err := c.db.Batch(func(tx *bbolt.Tx) error {
- b := tx.Bucket(defaultBucket)
- key := []byte(obj.addr)
- newRecord = b.Get(key) == nil
- if newRecord {
- return b.Put(key, obj.data)
- }
- return nil
- })
- if err == nil {
- storagelog.Write(c.log,
- storagelog.AddressField(obj.addr),
- storagelog.StorageTypeField(wcStorageType),
- storagelog.OpField("db PUT"),
- )
- if newRecord {
- c.objCounters.cDB.Add(1)
- c.estimateCacheSize()
- }
- }
- return err
-}
-
// putBig writes object to FSTree and pushes it to the flush workers queue.
-func (c *cache) putBig(ctx context.Context, addr string, prm common.PutPrm) error {
+func (c *cache) putBig(ctx context.Context, prm common.PutPrm) error {
if !c.hasEnoughSpaceFS() {
return ErrOutOfSpace
}
@@ -116,7 +69,7 @@ func (c *cache) putBig(ctx context.Context, addr string, prm common.PutPrm) erro
}
storagelog.Write(c.log,
- storagelog.AddressField(addr),
+ storagelog.AddressField(prm.Address.EncodeToString()),
storagelog.StorageTypeField(wcStorageType),
storagelog.OpField("fstree PUT"),
)
diff --git a/pkg/local_object_storage/writecache/state.go b/pkg/local_object_storage/writecache/state.go
index d03f4a63e..e4e22f404 100644
--- a/pkg/local_object_storage/writecache/state.go
+++ b/pkg/local_object_storage/writecache/state.go
@@ -1,29 +1,18 @@
package writecache
import (
- "fmt"
"math"
"sync/atomic"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
- "go.etcd.io/bbolt"
)
func (c *cache) estimateCacheSize() (uint64, uint64) {
- dbCount := c.objCounters.DB()
fsCount := c.objCounters.FS()
- if fsCount > 0 {
- fsCount-- // db file
- }
- dbSize := dbCount * c.smallObjectSize
fsSize := fsCount * c.maxObjectSize
- c.metrics.SetEstimateSize(dbSize, fsSize)
- c.metrics.SetActualCounters(dbCount, fsCount)
- return dbCount + fsCount, dbSize + fsSize
-}
-
-func (c *cache) hasEnoughSpaceDB() bool {
- return c.hasEnoughSpace(c.smallObjectSize)
+ c.metrics.SetEstimateSize(0, fsSize)
+ c.metrics.SetActualCounters(0, fsCount)
+ return fsCount, fsSize
}
func (c *cache) hasEnoughSpaceFS() bool {
@@ -41,11 +30,7 @@ func (c *cache) hasEnoughSpace(objectSize uint64) bool {
var _ fstree.FileCounter = &counters{}
type counters struct {
- cDB, cFS atomic.Uint64
-}
-
-func (x *counters) DB() uint64 {
- return x.cDB.Load()
+ cFS atomic.Uint64
}
func (x *counters) FS() uint64 {
@@ -68,18 +53,6 @@ func (x *counters) Dec() {
}
func (c *cache) initCounters() error {
- var inDB uint64
- err := c.db.View(func(tx *bbolt.Tx) error {
- b := tx.Bucket(defaultBucket)
- if b != nil {
- inDB = uint64(b.Stats().KeyN)
- }
- return nil
- })
- if err != nil {
- return fmt.Errorf("could not read write-cache DB counter: %w", err)
- }
- c.objCounters.cDB.Store(inDB)
c.estimateCacheSize()
return nil
}
diff --git a/pkg/local_object_storage/writecache/storage.go b/pkg/local_object_storage/writecache/storage.go
index 57021cc17..309bd2a66 100644
--- a/pkg/local_object_storage/writecache/storage.go
+++ b/pkg/local_object_storage/writecache/storage.go
@@ -3,7 +3,6 @@ package writecache
import (
"context"
"fmt"
- "math"
"os"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
@@ -14,16 +13,9 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "go.etcd.io/bbolt"
"go.uber.org/zap"
)
-// store represents persistent storage with in-memory LRU cache
-// for flushed items on top of it.
-type store struct {
- db *bbolt.DB
-}
-
const dbName = "small.bolt"
func (c *cache) openStore(mod mode.ComponentMode) error {
@@ -32,24 +24,6 @@ func (c *cache) openStore(mod mode.ComponentMode) error {
return err
}
- c.db, err = OpenDB(c.path, mod.ReadOnly(), c.openFile, c.pageSize)
- if err != nil {
- return fmt.Errorf("could not open database: %w", err)
- }
-
- c.db.MaxBatchSize = c.maxBatchSize
- c.db.MaxBatchDelay = c.maxBatchDelay
-
- if !mod.ReadOnly() {
- err = c.db.Update(func(tx *bbolt.Tx) error {
- _, err := tx.CreateBucketIfNotExists(defaultBucket)
- return err
- })
- if err != nil {
- return fmt.Errorf("could not create default bucket: %w", err)
- }
- }
-
c.fsTree = fstree.New(
fstree.WithPath(c.path),
fstree.WithPerm(os.ModePerm),
@@ -68,41 +42,6 @@ func (c *cache) openStore(mod mode.ComponentMode) error {
return nil
}
-func (c *cache) deleteFromDB(key string, batched bool) {
- var recordDeleted bool
- var err error
- if batched {
- err = c.db.Batch(func(tx *bbolt.Tx) error {
- b := tx.Bucket(defaultBucket)
- key := []byte(key)
- recordDeleted = b.Get(key) != nil
- return b.Delete(key)
- })
- } else {
- err = c.db.Update(func(tx *bbolt.Tx) error {
- b := tx.Bucket(defaultBucket)
- key := []byte(key)
- recordDeleted = b.Get(key) != nil
- return b.Delete(key)
- })
- }
-
- if err == nil {
- c.metrics.Evict(StorageTypeDB)
- storagelog.Write(c.log,
- storagelog.AddressField(key),
- storagelog.StorageTypeField(wcStorageType),
- storagelog.OpField("db DELETE"),
- )
- if recordDeleted {
- c.objCounters.cDB.Add(math.MaxUint64)
- c.estimateCacheSize()
- }
- } else {
- c.log.Error(logs.WritecacheCantRemoveObjectsFromTheDatabase, zap.Error(err))
- }
-}
-
func (c *cache) deleteFromDisk(ctx context.Context, addr oid.Address) {
_, err := c.fsTree.Delete(ctx, common.DeletePrm{Address: addr})
if err != nil && !client.IsErrObjectNotFound(err) {
From b142b6f48e46210235a56840c79da358120cf0cc Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Tue, 10 Sep 2024 11:01:30 +0300
Subject: [PATCH 099/705] [#1367] fstree: Add size to file counter
FSTree file counter used by writecache. As writecache has now only one
storage, so it is required to use real object size to get writecache
size more accurate than `count * max_object_size`.
Signed-off-by: Dmitrii Stepanov
---
.../blobstor/fstree/counter.go | 61 +++++++++++++++----
.../blobstor/fstree/fstree.go | 22 ++++---
.../blobstor/fstree/fstree_test.go | 15 +++--
.../blobstor/fstree/fstree_write_generic.go | 27 +++++---
.../blobstor/fstree/fstree_write_linux.go | 42 ++++++++++---
pkg/local_object_storage/writecache/cache.go | 3 +
.../writecache/options.go | 2 -
pkg/local_object_storage/writecache/state.go | 41 ++-----------
.../writecache/storage.go | 2 +-
9 files changed, 130 insertions(+), 85 deletions(-)
diff --git a/pkg/local_object_storage/blobstor/fstree/counter.go b/pkg/local_object_storage/blobstor/fstree/counter.go
index 718104e2e..b5dbc9e40 100644
--- a/pkg/local_object_storage/blobstor/fstree/counter.go
+++ b/pkg/local_object_storage/blobstor/fstree/counter.go
@@ -1,22 +1,21 @@
package fstree
import (
- "math"
- "sync/atomic"
+ "sync"
)
// FileCounter used to count files in FSTree. The implementation must be thread-safe.
type FileCounter interface {
- Set(v uint64)
- Inc()
- Dec()
+ Set(count, size uint64)
+ Inc(size uint64)
+ Dec(size uint64)
}
type noopCounter struct{}
-func (c *noopCounter) Set(uint64) {}
-func (c *noopCounter) Inc() {}
-func (c *noopCounter) Dec() {}
+func (c *noopCounter) Set(uint64, uint64) {}
+func (c *noopCounter) Inc(uint64) {}
+func (c *noopCounter) Dec(uint64) {}
func counterEnabled(c FileCounter) bool {
_, noop := c.(*noopCounter)
@@ -24,14 +23,50 @@ func counterEnabled(c FileCounter) bool {
}
type SimpleCounter struct {
- v atomic.Uint64
+ mtx sync.RWMutex
+ count uint64
+ size uint64
}
func NewSimpleCounter() *SimpleCounter {
return &SimpleCounter{}
}
-func (c *SimpleCounter) Set(v uint64) { c.v.Store(v) }
-func (c *SimpleCounter) Inc() { c.v.Add(1) }
-func (c *SimpleCounter) Dec() { c.v.Add(math.MaxUint64) }
-func (c *SimpleCounter) Value() uint64 { return c.v.Load() }
+func (c *SimpleCounter) Set(count, size uint64) {
+ c.mtx.Lock()
+ defer c.mtx.Unlock()
+
+ c.count = count
+ c.size = size
+}
+
+func (c *SimpleCounter) Inc(size uint64) {
+ c.mtx.Lock()
+ defer c.mtx.Unlock()
+
+ c.count++
+ c.size += size
+}
+
+func (c *SimpleCounter) Dec(size uint64) {
+ c.mtx.Lock()
+ defer c.mtx.Unlock()
+
+ if c.count > 0 {
+ c.count--
+ } else {
+ panic("fstree.SimpleCounter: invalid count")
+ }
+ if c.size >= size {
+ c.size -= size
+ } else {
+ panic("fstree.SimpleCounter: invalid size")
+ }
+}
+
+func (c *SimpleCounter) CountSize() (uint64, uint64) {
+ c.mtx.RLock()
+ defer c.mtx.RUnlock()
+
+ return c.count, c.size
+}
diff --git a/pkg/local_object_storage/blobstor/fstree/fstree.go b/pkg/local_object_storage/blobstor/fstree/fstree.go
index 02580dbfa..bf6ba51e5 100644
--- a/pkg/local_object_storage/blobstor/fstree/fstree.go
+++ b/pkg/local_object_storage/blobstor/fstree/fstree.go
@@ -435,32 +435,38 @@ func (t *FSTree) initFileCounter() error {
return nil
}
- counter, err := t.countFiles()
+ count, size, err := t.countFiles()
if err != nil {
return err
}
- t.fileCounter.Set(counter)
+ t.fileCounter.Set(count, size)
return nil
}
-func (t *FSTree) countFiles() (uint64, error) {
- var counter uint64
+func (t *FSTree) countFiles() (uint64, uint64, error) {
+ var count, size uint64
// it is simpler to just consider every file
// that is not directory as an object
err := filepath.WalkDir(t.RootPath,
func(_ string, d fs.DirEntry, _ error) error {
- if !d.IsDir() {
- counter++
+ if d.IsDir() {
+ return nil
}
+ count++
+ info, err := d.Info()
+ if err != nil {
+ return err
+ }
+ size += uint64(info.Size())
return nil
},
)
if err != nil {
- return 0, fmt.Errorf("could not walk through %s directory: %w", t.RootPath, err)
+ return 0, 0, fmt.Errorf("could not walk through %s directory: %w", t.RootPath, err)
}
- return counter, nil
+ return count, size, nil
}
func (t *FSTree) ObjectsCount(ctx context.Context) (uint64, error) {
diff --git a/pkg/local_object_storage/blobstor/fstree/fstree_test.go b/pkg/local_object_storage/blobstor/fstree/fstree_test.go
index 5786dfd3b..f39c7296e 100644
--- a/pkg/local_object_storage/blobstor/fstree/fstree_test.go
+++ b/pkg/local_object_storage/blobstor/fstree/fstree_test.go
@@ -47,8 +47,9 @@ func TestObjectCounter(t *testing.T) {
require.NoError(t, fst.Open(mode.ComponentReadWrite))
require.NoError(t, fst.Init())
- counterValue := counter.Value()
- require.Equal(t, uint64(0), counterValue)
+ count, size := counter.CountSize()
+ require.Equal(t, uint64(0), count)
+ require.Equal(t, uint64(0), size)
defer func() {
require.NoError(t, fst.Close())
@@ -64,9 +65,6 @@ func TestObjectCounter(t *testing.T) {
putPrm.Address = addr
putPrm.RawData, _ = obj.Marshal()
- var getPrm common.GetPrm
- getPrm.Address = putPrm.Address
-
var delPrm common.DeletePrm
delPrm.Address = addr
@@ -95,8 +93,9 @@ func TestObjectCounter(t *testing.T) {
require.NoError(t, eg.Wait())
- counterValue = counter.Value()
- realCount, err := fst.countFiles()
+ count, size = counter.CountSize()
+ realCount, realSize, err := fst.countFiles()
require.NoError(t, err)
- require.Equal(t, realCount, counterValue)
+ require.Equal(t, realCount, count, "real %d, actual %d", realCount, count)
+ require.Equal(t, realSize, size, "real %d, actual %d", realSize, size)
}
diff --git a/pkg/local_object_storage/blobstor/fstree/fstree_write_generic.go b/pkg/local_object_storage/blobstor/fstree/fstree_write_generic.go
index 8b2622885..801fc4a22 100644
--- a/pkg/local_object_storage/blobstor/fstree/fstree_write_generic.go
+++ b/pkg/local_object_storage/blobstor/fstree/fstree_write_generic.go
@@ -78,14 +78,14 @@ func (w *genericWriter) writeAndRename(tmpPath, p string, data []byte) error {
}
if w.fileCounterEnabled {
- w.fileCounter.Inc()
+ w.fileCounter.Inc(uint64(len(data)))
var targetFileExists bool
if _, e := os.Stat(p); e == nil {
targetFileExists = true
}
err = os.Rename(tmpPath, p)
if err == nil && targetFileExists {
- w.fileCounter.Dec()
+ w.fileCounter.Dec(uint64(len(data)))
}
} else {
err = os.Rename(tmpPath, p)
@@ -110,12 +110,7 @@ func (w *genericWriter) writeFile(p string, data []byte) error {
func (w *genericWriter) removeFile(p string) error {
var err error
if w.fileCounterEnabled {
- w.fileGuard.Lock(p)
- err = os.Remove(p)
- w.fileGuard.Unlock(p)
- if err == nil {
- w.fileCounter.Dec()
- }
+ err = w.removeWithCounter(p)
} else {
err = os.Remove(p)
}
@@ -125,3 +120,19 @@ func (w *genericWriter) removeFile(p string) error {
}
return err
}
+
+func (w *genericWriter) removeWithCounter(p string) error {
+ w.fileGuard.Lock(p)
+ defer w.fileGuard.Unlock(p)
+
+ stat, err := os.Stat(p)
+ if err != nil {
+ return err
+ }
+
+ if err := os.Remove(p); err != nil {
+ return err
+ }
+ w.fileCounter.Dec(uint64(stat.Size()))
+ return nil
+}
diff --git a/pkg/local_object_storage/blobstor/fstree/fstree_write_linux.go b/pkg/local_object_storage/blobstor/fstree/fstree_write_linux.go
index efc5a3d3d..3127579ac 100644
--- a/pkg/local_object_storage/blobstor/fstree/fstree_write_linux.go
+++ b/pkg/local_object_storage/blobstor/fstree/fstree_write_linux.go
@@ -9,6 +9,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
+ utilSync "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/sync"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
"golang.org/x/sys/unix"
)
@@ -18,7 +19,9 @@ type linuxWriter struct {
perm uint32
flags int
- counter FileCounter
+ fileGuard keyLock
+ fileCounter FileCounter
+ fileCounterEnabled bool
}
func newSpecificWriteData(c FileCounter, root string, perm fs.FileMode, noSync bool) writer {
@@ -33,11 +36,18 @@ func newSpecificWriteData(c FileCounter, root string, perm fs.FileMode, noSync b
return nil
}
_ = unix.Close(fd) // Don't care about error.
+ var fileGuard keyLock = &noopKeyLock{}
+ fileCounterEnabled := counterEnabled(c)
+ if fileCounterEnabled {
+ fileGuard = utilSync.NewKeyLocker[string]()
+ }
w := &linuxWriter{
- root: root,
- perm: uint32(perm),
- flags: flags,
- counter: c,
+ root: root,
+ perm: uint32(perm),
+ flags: flags,
+ fileGuard: fileGuard,
+ fileCounter: c,
+ fileCounterEnabled: fileCounterEnabled,
}
return w
}
@@ -51,6 +61,10 @@ func (w *linuxWriter) writeData(p string, data []byte) error {
}
func (w *linuxWriter) writeFile(p string, data []byte) error {
+ if w.fileCounterEnabled {
+ w.fileGuard.Lock(p)
+ defer w.fileGuard.Unlock(p)
+ }
fd, err := unix.Open(w.root, w.flags, w.perm)
if err != nil {
return err
@@ -61,7 +75,7 @@ func (w *linuxWriter) writeFile(p string, data []byte) error {
if n == len(data) {
err = unix.Linkat(unix.AT_FDCWD, tmpPath, unix.AT_FDCWD, p, unix.AT_SYMLINK_FOLLOW)
if err == nil {
- w.counter.Inc()
+ w.fileCounter.Inc(uint64(len(data)))
}
if errors.Is(err, unix.EEXIST) {
err = nil
@@ -78,12 +92,24 @@ func (w *linuxWriter) writeFile(p string, data []byte) error {
}
func (w *linuxWriter) removeFile(p string) error {
- err := unix.Unlink(p)
+ if w.fileCounterEnabled {
+ w.fileGuard.Lock(p)
+ defer w.fileGuard.Unlock(p)
+ }
+ var stat unix.Stat_t
+ err := unix.Stat(p, &stat)
+ if err != nil {
+ if err == unix.ENOENT {
+ return logicerr.Wrap(new(apistatus.ObjectNotFound))
+ }
+ return err
+ }
+ err = unix.Unlink(p)
if err != nil && err == unix.ENOENT {
return logicerr.Wrap(new(apistatus.ObjectNotFound))
}
if err == nil {
- w.counter.Dec()
+ w.fileCounter.Dec(uint64(stat.Size))
}
return err
}
diff --git a/pkg/local_object_storage/writecache/cache.go b/pkg/local_object_storage/writecache/cache.go
index ff38de407..f2280f2f4 100644
--- a/pkg/local_object_storage/writecache/cache.go
+++ b/pkg/local_object_storage/writecache/cache.go
@@ -29,6 +29,8 @@ type cache struct {
wg sync.WaitGroup
// fsTree contains big files stored directly on file-system.
fsTree *fstree.FSTree
+ // counter contains atomic counters for the number of objects stored in cache.
+ counter *fstree.SimpleCounter
}
// wcStorageType is used for write-cache operations logging.
@@ -56,6 +58,7 @@ func New(opts ...Option) Cache {
c := &cache{
flushCh: make(chan objectInfo),
mode: mode.Disabled,
+ counter: fstree.NewSimpleCounter(),
options: options{
log: &logger.Logger{Logger: zap.NewNop()},
diff --git a/pkg/local_object_storage/writecache/options.go b/pkg/local_object_storage/writecache/options.go
index 7845c5da9..0643faac0 100644
--- a/pkg/local_object_storage/writecache/options.go
+++ b/pkg/local_object_storage/writecache/options.go
@@ -30,8 +30,6 @@ type options struct {
// maxCacheCount is the maximum total count of all object saved in cache.
// 0 (no limit) by default.
maxCacheCount uint64
- // objCounters contains atomic counters for the number of objects stored in cache.
- objCounters counters
// maxBatchSize is the maximum batch size for the small object database.
maxBatchSize int
// maxBatchDelay is the maximum batch wait time for the small object database.
diff --git a/pkg/local_object_storage/writecache/state.go b/pkg/local_object_storage/writecache/state.go
index e4e22f404..748c78bcb 100644
--- a/pkg/local_object_storage/writecache/state.go
+++ b/pkg/local_object_storage/writecache/state.go
@@ -1,18 +1,10 @@
package writecache
-import (
- "math"
- "sync/atomic"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
-)
-
func (c *cache) estimateCacheSize() (uint64, uint64) {
- fsCount := c.objCounters.FS()
- fsSize := fsCount * c.maxObjectSize
- c.metrics.SetEstimateSize(0, fsSize)
- c.metrics.SetActualCounters(0, fsCount)
- return fsCount, fsSize
+ count, size := c.counter.CountSize()
+ c.metrics.SetEstimateSize(0, size)
+ c.metrics.SetActualCounters(0, count)
+ return count, size
}
func (c *cache) hasEnoughSpaceFS() bool {
@@ -27,31 +19,6 @@ func (c *cache) hasEnoughSpace(objectSize uint64) bool {
return c.maxCacheSize >= size+objectSize
}
-var _ fstree.FileCounter = &counters{}
-
-type counters struct {
- cFS atomic.Uint64
-}
-
-func (x *counters) FS() uint64 {
- return x.cFS.Load()
-}
-
-// Set implements fstree.ObjectCounter.
-func (x *counters) Set(v uint64) {
- x.cFS.Store(v)
-}
-
-// Inc implements fstree.ObjectCounter.
-func (x *counters) Inc() {
- x.cFS.Add(1)
-}
-
-// Dec implements fstree.ObjectCounter.
-func (x *counters) Dec() {
- x.cFS.Add(math.MaxUint64)
-}
-
func (c *cache) initCounters() error {
c.estimateCacheSize()
return nil
diff --git a/pkg/local_object_storage/writecache/storage.go b/pkg/local_object_storage/writecache/storage.go
index 309bd2a66..e708a529e 100644
--- a/pkg/local_object_storage/writecache/storage.go
+++ b/pkg/local_object_storage/writecache/storage.go
@@ -30,7 +30,7 @@ func (c *cache) openStore(mod mode.ComponentMode) error {
fstree.WithDepth(1),
fstree.WithDirNameLen(1),
fstree.WithNoSync(c.noSync),
- fstree.WithFileCounter(&c.objCounters),
+ fstree.WithFileCounter(c.counter),
)
if err := c.fsTree.Open(mod); err != nil {
return fmt.Errorf("could not open FSTree: %w", err)
From 2dd3a6f7a85800452f38ddb3127dea455b6366dc Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Tue, 10 Sep 2024 11:47:42 +0300
Subject: [PATCH 100/705] [#1367] fstree: Add IterateInfo method
Signed-off-by: Dmitrii Stepanov
---
.../blobstor/fstree/fstree.go | 75 +++++++++++++++++++
.../blobstor/fstree/metrics.go | 2 +
pkg/local_object_storage/metrics/fstree.go | 4 +
3 files changed, 81 insertions(+)
diff --git a/pkg/local_object_storage/blobstor/fstree/fstree.go b/pkg/local_object_storage/blobstor/fstree/fstree.go
index bf6ba51e5..1c60ec340 100644
--- a/pkg/local_object_storage/blobstor/fstree/fstree.go
+++ b/pkg/local_object_storage/blobstor/fstree/fstree.go
@@ -222,6 +222,81 @@ func (t *FSTree) iterate(ctx context.Context, depth uint64, curPath []string, pr
return nil
}
+type ObjectInfo struct {
+ Address oid.Address
+ DataSize uint64
+}
+type IterateInfoHandler func(ObjectInfo) error
+
+func (t *FSTree) IterateInfo(ctx context.Context, handler IterateInfoHandler) error {
+ var (
+ err error
+ startedAt = time.Now()
+ )
+ defer func() {
+ t.metrics.IterateInfo(time.Since(startedAt), err == nil)
+ }()
+ _, span := tracing.StartSpanFromContext(ctx, "FSTree.IterateInfo")
+ defer span.End()
+
+ return t.iterateInfo(ctx, 0, []string{t.RootPath}, handler)
+}
+
+func (t *FSTree) iterateInfo(ctx context.Context, depth uint64, curPath []string, handler IterateInfoHandler) error {
+ curName := strings.Join(curPath[1:], "")
+ dirPath := filepath.Join(curPath...)
+ entries, err := os.ReadDir(dirPath)
+ if err != nil {
+ return fmt.Errorf("read fstree dir '%s': %w", dirPath, err)
+ }
+
+ isLast := depth >= t.Depth
+ l := len(curPath)
+ curPath = append(curPath, "")
+
+ for i := range entries {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+ curPath[l] = entries[i].Name()
+
+ if !isLast && entries[i].IsDir() {
+ err := t.iterateInfo(ctx, depth+1, curPath, handler)
+ if err != nil {
+ return err
+ }
+ }
+
+ if depth != t.Depth {
+ continue
+ }
+
+ addr, err := addressFromString(curName + entries[i].Name())
+ if err != nil {
+ continue
+ }
+ info, err := entries[i].Info()
+ if err != nil {
+ if os.IsNotExist(err) {
+ continue
+ }
+ return err
+ }
+
+ err = handler(ObjectInfo{
+ Address: addr,
+ DataSize: uint64(info.Size()),
+ })
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
func (t *FSTree) treePath(addr oid.Address) string {
sAddr := stringifyAddress(addr)
diff --git a/pkg/local_object_storage/blobstor/fstree/metrics.go b/pkg/local_object_storage/blobstor/fstree/metrics.go
index 10de935eb..4241beec9 100644
--- a/pkg/local_object_storage/blobstor/fstree/metrics.go
+++ b/pkg/local_object_storage/blobstor/fstree/metrics.go
@@ -13,6 +13,7 @@ type Metrics interface {
Close()
Iterate(d time.Duration, success bool)
+ IterateInfo(d time.Duration, success bool)
Delete(d time.Duration, success bool)
Exists(d time.Duration, success bool)
Put(d time.Duration, size int, success bool)
@@ -27,6 +28,7 @@ func (m *noopMetrics) SetParentID(string) {}
func (m *noopMetrics) SetMode(mode.ComponentMode) {}
func (m *noopMetrics) Close() {}
func (m *noopMetrics) Iterate(time.Duration, bool) {}
+func (m *noopMetrics) IterateInfo(time.Duration, bool) {}
func (m *noopMetrics) Delete(time.Duration, bool) {}
func (m *noopMetrics) Exists(time.Duration, bool) {}
func (m *noopMetrics) Put(time.Duration, int, bool) {}
diff --git a/pkg/local_object_storage/metrics/fstree.go b/pkg/local_object_storage/metrics/fstree.go
index 76822ac2c..d93363fa3 100644
--- a/pkg/local_object_storage/metrics/fstree.go
+++ b/pkg/local_object_storage/metrics/fstree.go
@@ -38,6 +38,10 @@ func (m *fstreeMetrics) Iterate(d time.Duration, success bool) {
m.m.MethodDuration(m.shardID, m.path, "Iterate", d, success)
}
+func (m *fstreeMetrics) IterateInfo(d time.Duration, success bool) {
+ m.m.MethodDuration(m.shardID, m.path, "IterateInfo", d, success)
+}
+
func (m *fstreeMetrics) Delete(d time.Duration, success bool) {
m.m.MethodDuration(m.shardID, m.path, "Delete", d, success)
}
From 8a6e3025a07d9c4d80a6252f4ee8bb0e0aa2021d Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Tue, 10 Sep 2024 11:49:17 +0300
Subject: [PATCH 101/705] [#1367] writecache: Flush from FSTree concurrently
Signed-off-by: Dmitrii Stepanov
---
internal/logs/logs.go | 1 +
pkg/local_object_storage/writecache/cache.go | 7 +--
pkg/local_object_storage/writecache/flush.go | 65 +++++++++++++++++---
3 files changed, 62 insertions(+), 11 deletions(-)
diff --git a/internal/logs/logs.go b/internal/logs/logs.go
index 87e4e0b43..7aef6873e 100644
--- a/internal/logs/logs.go
+++ b/internal/logs/logs.go
@@ -544,4 +544,5 @@ const (
FailedToSealWritecacheAsync = "failed to seal writecache async"
WritecacheShrinkSkippedNotEmpty = "writecache shrink skipped: not empty"
BlobovniczatreeFailedToRemoveRebuildTempFile = "failed to remove rebuild temp file"
+ WritecacheCantGetObject = "can't get an object from fstree"
)
diff --git a/pkg/local_object_storage/writecache/cache.go b/pkg/local_object_storage/writecache/cache.go
index f2280f2f4..b298f812a 100644
--- a/pkg/local_object_storage/writecache/cache.go
+++ b/pkg/local_object_storage/writecache/cache.go
@@ -10,7 +10,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.etcd.io/bbolt"
"go.uber.org/zap"
)
@@ -37,9 +37,8 @@ type cache struct {
const wcStorageType = "write-cache"
type objectInfo struct {
- addr string
- data []byte
- obj *objectSDK.Object
+ addr oid.Address
+ size uint64
}
const (
diff --git a/pkg/local_object_storage/writecache/flush.go b/pkg/local_object_storage/writecache/flush.go
index 074756e32..d06896ed5 100644
--- a/pkg/local_object_storage/writecache/flush.go
+++ b/pkg/local_object_storage/writecache/flush.go
@@ -13,10 +13,12 @@ import (
objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.etcd.io/bbolt"
@@ -29,7 +31,7 @@ const (
// defaultFlushWorkersCount is number of workers for putting objects in main storage.
defaultFlushWorkersCount = 20
// defaultFlushInterval is default time interval between successive flushes.
- defaultFlushInterval = time.Second
+ defaultFlushInterval = 10 * time.Second
)
var errIterationCompleted = errors.New("iteration completed")
@@ -41,23 +43,41 @@ func (c *cache) runFlushLoop(ctx context.Context) {
}
c.wg.Add(1)
go func() {
- c.workerFlushBig(ctx)
- c.wg.Done()
+ defer c.wg.Done()
+ c.pushToFlushQueue(ctx)
}()
+
+ for range c.workersCount {
+ c.wg.Add(1)
+ go c.workerFlush(ctx)
+ }
}
-func (c *cache) workerFlushBig(ctx context.Context) {
- tick := time.NewTicker(defaultFlushInterval * 10)
+func (c *cache) pushToFlushQueue(ctx context.Context) {
+ tick := time.NewTicker(defaultFlushInterval)
for {
select {
case <-tick.C:
c.modeMtx.RLock()
if c.readOnly() || c.noMetabase() {
c.modeMtx.RUnlock()
- break
+ continue
}
- _ = c.flushFSTree(ctx, true)
+ err := c.fsTree.IterateInfo(ctx, func(oi fstree.ObjectInfo) error {
+ select {
+ case c.flushCh <- objectInfo{
+ addr: oi.Address,
+ size: oi.DataSize,
+ }:
+ return nil
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+ })
+ if err != nil {
+ c.log.Warn(logs.BlobstorErrorOccurredDuringTheIteration, zap.Error(err))
+ }
c.modeMtx.RUnlock()
case <-ctx.Done():
@@ -66,6 +86,37 @@ func (c *cache) workerFlushBig(ctx context.Context) {
}
}
+func (c *cache) workerFlush(ctx context.Context) {
+ defer c.wg.Done()
+
+ var objInfo objectInfo
+ for {
+ select {
+ case objInfo = <-c.flushCh:
+ case <-ctx.Done():
+ return
+ }
+
+ res, err := c.fsTree.Get(ctx, common.GetPrm{
+ Address: objInfo.addr,
+ })
+ if err != nil {
+ if !errors.As(err, new(*apistatus.ObjectNotFound)) {
+ c.reportFlushError(logs.WritecacheCantGetObject, objInfo.addr.EncodeToString(), metaerr.Wrap(err))
+ }
+ continue
+ }
+
+ err = c.flushObject(ctx, res.Object, res.RawData, StorageTypeFSTree)
+ if err != nil {
+ // Error is handled in flushObject.
+ continue
+ }
+
+ c.deleteFromDisk(ctx, objInfo.addr)
+ }
+}
+
func (c *cache) reportFlushError(msg string, addr string, err error) {
if c.reportError != nil {
c.reportError(msg, err)
From e39378b1c36d0d00864c4f5e7fcab44975ce506d Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Tue, 10 Sep 2024 12:56:29 +0300
Subject: [PATCH 102/705] [#1367] writecache: Add background flushing objects
limiter
To limit memory usage by background flush.
Signed-off-by: Dmitrii Stepanov
---
cmd/frostfs-node/config.go | 3 +
cmd/frostfs-node/config/engine/config_test.go | 2 +
.../config/engine/shard/writecache/config.go | 18 +++++
config/example/node.env | 1 +
config/example/node.json | 3 +-
config/example/node.yaml | 1 +
docs/storage-node-configuration.md | 23 +++---
pkg/local_object_storage/writecache/cache.go | 1 +
pkg/local_object_storage/writecache/flush.go | 61 ++++++++++------
.../writecache/limiter.go | 70 +++++++++++++++++++
.../writecache/limiter_test.go | 27 +++++++
.../writecache/options.go | 9 +++
12 files changed, 184 insertions(+), 35 deletions(-)
create mode 100644 pkg/local_object_storage/writecache/limiter.go
create mode 100644 pkg/local_object_storage/writecache/limiter_test.go
diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go
index 16f49a082..ef2752872 100644
--- a/cmd/frostfs-node/config.go
+++ b/cmd/frostfs-node/config.go
@@ -154,6 +154,7 @@ type shardCfg struct {
countLimit uint64
noSync bool
pageSize int
+ flushSizeLimit uint64
}
piloramaCfg struct {
@@ -278,6 +279,7 @@ func (a *applicationConfiguration) setShardWriteCacheConfig(newConfig *shardCfg,
wc.sizeLimit = writeCacheCfg.SizeLimit()
wc.countLimit = writeCacheCfg.CountLimit()
wc.noSync = writeCacheCfg.NoSync()
+ wc.flushSizeLimit = writeCacheCfg.MaxFlushingObjectsSize()
}
}
@@ -865,6 +867,7 @@ func (c *cfg) getWriteCacheOpts(shCfg shardCfg) []writecache.Option {
writecache.WithMaxBatchSize(wcRead.maxBatchSize),
writecache.WithMaxBatchDelay(wcRead.maxBatchDelay),
writecache.WithPageSize(wcRead.pageSize),
+ writecache.WithFlushSizeLimit(wcRead.flushSizeLimit),
writecache.WithMaxObjectSize(wcRead.maxObjSize),
writecache.WithSmallObjectSize(wcRead.smallObjectSize),
writecache.WithFlushWorkersCount(wcRead.flushWorkerCount),
diff --git a/cmd/frostfs-node/config/engine/config_test.go b/cmd/frostfs-node/config/engine/config_test.go
index ef6bf7f74..b952aca4c 100644
--- a/cmd/frostfs-node/config/engine/config_test.go
+++ b/cmd/frostfs-node/config/engine/config_test.go
@@ -79,6 +79,7 @@ func TestEngineSection(t *testing.T) {
require.EqualValues(t, 3221225472, wc.SizeLimit())
require.EqualValues(t, 4096, wc.BoltDB().PageSize())
require.EqualValues(t, 49, wc.CountLimit())
+ require.EqualValues(t, uint64(100), wc.MaxFlushingObjectsSize())
require.Equal(t, "tmp/0/meta", meta.Path())
require.Equal(t, fs.FileMode(0o644), meta.BoltDB().Perm())
@@ -136,6 +137,7 @@ func TestEngineSection(t *testing.T) {
require.EqualValues(t, 4294967296, wc.SizeLimit())
require.EqualValues(t, 0, wc.BoltDB().PageSize())
require.EqualValues(t, writecacheconfig.CountLimitDefault, wc.CountLimit())
+ require.EqualValues(t, writecacheconfig.MaxFlushingObjectsSizeDefault, wc.MaxFlushingObjectsSize())
require.Equal(t, "tmp/1/meta", meta.Path())
require.Equal(t, fs.FileMode(0o644), meta.BoltDB().Perm())
diff --git a/cmd/frostfs-node/config/engine/shard/writecache/config.go b/cmd/frostfs-node/config/engine/shard/writecache/config.go
index bfe8144df..5a069e99f 100644
--- a/cmd/frostfs-node/config/engine/shard/writecache/config.go
+++ b/cmd/frostfs-node/config/engine/shard/writecache/config.go
@@ -24,6 +24,8 @@ const (
// CountLimitDefault is a default write-cache count limit.
CountLimitDefault = 0
+
+ MaxFlushingObjectsSizeDefault = 128 << 20
)
// From wraps config section into Config.
@@ -145,3 +147,19 @@ func (x *Config) NoSync() bool {
func (x *Config) BoltDB() *boltdbconfig.Config {
return (*boltdbconfig.Config)(x)
}
+
+// MaxFlushingObjectsSize returns the value of "max_flushing_objects_size" config parameter.
+//
+// Returns MaxFlushingObjectsSizeDefault if the value is not a positive number.
+func (x *Config) MaxFlushingObjectsSize() uint64 {
+ s := config.SizeInBytesSafe(
+ (*config.Config)(x),
+ "max_flushing_objects_size",
+ )
+
+ if s > 0 {
+ return s
+ }
+
+ return MaxFlushingObjectsSizeDefault
+}
diff --git a/config/example/node.env b/config/example/node.env
index 82553745e..c3fa85c13 100644
--- a/config/example/node.env
+++ b/config/example/node.env
@@ -106,6 +106,7 @@ FROSTFS_STORAGE_SHARD_0_WRITECACHE_FLUSH_WORKER_COUNT=30
FROSTFS_STORAGE_SHARD_0_WRITECACHE_CAPACITY=3221225472
FROSTFS_STORAGE_SHARD_0_WRITECACHE_PAGE_SIZE=4096
FROSTFS_STORAGE_SHARD_0_WRITECACHE_MAX_OBJECT_COUNT=49
+FROSTFS_STORAGE_SHARD_0_WRITECACHE_MAX_FLUSHING_OBJECTS_SIZE=100
### Metabase config
FROSTFS_STORAGE_SHARD_0_METABASE_PATH=tmp/0/meta
FROSTFS_STORAGE_SHARD_0_METABASE_PERM=0644
diff --git a/config/example/node.json b/config/example/node.json
index da108c692..d7187250b 100644
--- a/config/example/node.json
+++ b/config/example/node.json
@@ -149,7 +149,8 @@
"flush_worker_count": 30,
"capacity": 3221225472,
"page_size": 4096,
- "max_object_count": 49
+ "max_object_count": 49,
+ "max_flushing_objects_size": 100
},
"metabase": {
"path": "tmp/0/meta",
diff --git a/config/example/node.yaml b/config/example/node.yaml
index a79f48226..776b22bd0 100644
--- a/config/example/node.yaml
+++ b/config/example/node.yaml
@@ -172,6 +172,7 @@ storage:
capacity: 3221225472 # approximate write-cache total size, bytes
max_object_count: 49
page_size: 4k
+ max_flushing_objects_size: 100b
metabase:
path: tmp/0/meta # metabase path
diff --git a/docs/storage-node-configuration.md b/docs/storage-node-configuration.md
index 5bf35cd65..c83828978 100644
--- a/docs/storage-node-configuration.md
+++ b/docs/storage-node-configuration.md
@@ -293,17 +293,18 @@ writecache:
page_size: '4k'
```
-| Parameter | Type | Default value | Description |
-|----------------------|------------|---------------|-------------------------------------------------------------------------------------------------------------------------------|
-| `path` | `string` | | Path to the metabase file. |
-| `capacity` | `size` | `1G` | Approximate maximum size of the writecache. If the writecache is full, objects are written to the blobstor directly. |
-| `max_object_count` | `int` | unrestricted | Approximate maximum objects count in the writecache. If the writecache is full, objects are written to the blobstor directly. |
-| `small_object_size` | `size` | `32K` | Maximum object size for "small" objects. This objects are stored in a key-value database instead of a file-system. |
-| `max_object_size` | `size` | `64M` | Maximum object size allowed to be stored in the writecache. |
-| `flush_worker_count` | `int` | `20` | Amount of background workers that move data from the writecache to the blobstor. |
-| `max_batch_size` | `int` | `1000` | Maximum amount of small object `PUT` operations to perform in a single transaction. |
-| `max_batch_delay` | `duration` | `10ms` | Maximum delay before a batch starts. |
-| `page_size` | `size` | `0` | Page size overrides the default OS page size for small objects storage. Does not affect the existing storage. |
+| Parameter | Type | Default value | Description |
+| --------------------------- | ---------- | ------------- | ----------------------------------------------------------------------------------------------------------------------------- |
+| `path` | `string` | | Path to the metabase file. |
+| `capacity` | `size` | `1G` | Approximate maximum size of the writecache. If the writecache is full, objects are written to the blobstor directly. |
+| `max_object_count` | `int` | unrestricted | Approximate maximum objects count in the writecache. If the writecache is full, objects are written to the blobstor directly. |
+| `small_object_size` | `size` | `32K` | Maximum object size for "small" objects. This objects are stored in a key-value database instead of a file-system. |
+| `max_object_size` | `size` | `64M` | Maximum object size allowed to be stored in the writecache. |
+| `flush_worker_count` | `int` | `20` | Amount of background workers that move data from the writecache to the blobstor. |
+| `max_flushing_objects_size` | `size` | `512M` | Max total size of background flushing objects. |
+| `max_batch_size` | `int` | `1000` | Maximum amount of small object `PUT` operations to perform in a single transaction. |
+| `max_batch_delay` | `duration` | `10ms` | Maximum delay before a batch starts. |
+| `page_size` | `size` | `0` | Page size overrides the default OS page size for small objects storage. Does not affect the existing storage. |
# `node` section
diff --git a/pkg/local_object_storage/writecache/cache.go b/pkg/local_object_storage/writecache/cache.go
index b298f812a..f0f10d8b5 100644
--- a/pkg/local_object_storage/writecache/cache.go
+++ b/pkg/local_object_storage/writecache/cache.go
@@ -68,6 +68,7 @@ func New(opts ...Option) Cache {
maxBatchSize: bbolt.DefaultMaxBatchSize,
maxBatchDelay: bbolt.DefaultMaxBatchDelay,
metrics: DefaultMetrics(),
+ flushSizeLimit: defaultFlushWorkersCount * defaultMaxObjectSize,
},
}
diff --git a/pkg/local_object_storage/writecache/flush.go b/pkg/local_object_storage/writecache/flush.go
index d06896ed5..5d5fc13ab 100644
--- a/pkg/local_object_storage/writecache/flush.go
+++ b/pkg/local_object_storage/writecache/flush.go
@@ -18,7 +18,7 @@ import (
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.etcd.io/bbolt"
@@ -41,19 +41,25 @@ func (c *cache) runFlushLoop(ctx context.Context) {
if c.disableBackgroundFlush {
return
}
+ fl := newFlushLimiter(c.flushSizeLimit)
c.wg.Add(1)
go func() {
defer c.wg.Done()
- c.pushToFlushQueue(ctx)
+ c.pushToFlushQueue(ctx, fl)
}()
for range c.workersCount {
c.wg.Add(1)
- go c.workerFlush(ctx)
+ go c.workerFlush(ctx, fl)
}
}
-func (c *cache) pushToFlushQueue(ctx context.Context) {
+func (c *cache) pushToFlushQueue(ctx context.Context, fl *flushLimiter) {
+ stopf := context.AfterFunc(ctx, func() {
+ fl.close()
+ })
+ defer stopf()
+
tick := time.NewTicker(defaultFlushInterval)
for {
select {
@@ -65,6 +71,9 @@ func (c *cache) pushToFlushQueue(ctx context.Context) {
}
err := c.fsTree.IterateInfo(ctx, func(oi fstree.ObjectInfo) error {
+ if err := fl.acquire(oi.DataSize); err != nil {
+ return err
+ }
select {
case c.flushCh <- objectInfo{
addr: oi.Address,
@@ -72,6 +81,7 @@ func (c *cache) pushToFlushQueue(ctx context.Context) {
}:
return nil
case <-ctx.Done():
+ fl.release(oi.DataSize)
return ctx.Err()
}
})
@@ -86,37 +96,42 @@ func (c *cache) pushToFlushQueue(ctx context.Context) {
}
}
-func (c *cache) workerFlush(ctx context.Context) {
+func (c *cache) workerFlush(ctx context.Context, fl *flushLimiter) {
defer c.wg.Done()
var objInfo objectInfo
for {
select {
case objInfo = <-c.flushCh:
+ c.flushIfAnObjectExistsWorker(ctx, objInfo, fl)
case <-ctx.Done():
return
}
-
- res, err := c.fsTree.Get(ctx, common.GetPrm{
- Address: objInfo.addr,
- })
- if err != nil {
- if !errors.As(err, new(*apistatus.ObjectNotFound)) {
- c.reportFlushError(logs.WritecacheCantGetObject, objInfo.addr.EncodeToString(), metaerr.Wrap(err))
- }
- continue
- }
-
- err = c.flushObject(ctx, res.Object, res.RawData, StorageTypeFSTree)
- if err != nil {
- // Error is handled in flushObject.
- continue
- }
-
- c.deleteFromDisk(ctx, objInfo.addr)
}
}
+func (c *cache) flushIfAnObjectExistsWorker(ctx context.Context, objInfo objectInfo, fl *flushLimiter) {
+ defer fl.release(objInfo.size)
+
+ res, err := c.fsTree.Get(ctx, common.GetPrm{
+ Address: objInfo.addr,
+ })
+ if err != nil {
+ if !client.IsErrObjectNotFound(err) {
+ c.reportFlushError(logs.WritecacheCantGetObject, objInfo.addr.EncodeToString(), metaerr.Wrap(err))
+ }
+ return
+ }
+
+ err = c.flushObject(ctx, res.Object, res.RawData, StorageTypeFSTree)
+ if err != nil {
+ // Error is handled in flushObject.
+ return
+ }
+
+ c.deleteFromDisk(ctx, objInfo.addr)
+}
+
func (c *cache) reportFlushError(msg string, addr string, err error) {
if c.reportError != nil {
c.reportError(msg, err)
diff --git a/pkg/local_object_storage/writecache/limiter.go b/pkg/local_object_storage/writecache/limiter.go
new file mode 100644
index 000000000..ddc4101be
--- /dev/null
+++ b/pkg/local_object_storage/writecache/limiter.go
@@ -0,0 +1,70 @@
+package writecache
+
+import (
+ "errors"
+ "sync"
+)
+
+var errLimiterClosed = errors.New("acquire failed: limiter closed")
+
+// flushLimiter is used to limit the total size of objects
+// being flushed to blobstore at the same time. This is a necessary
+// limitation so that the flushing process does not have
+// a strong impact on user requests.
+type flushLimiter struct {
+ count, size uint64
+ maxSize uint64
+ cond *sync.Cond
+ closed bool
+}
+
+func newFlushLimiter(maxSize uint64) *flushLimiter {
+ return &flushLimiter{
+ maxSize: maxSize,
+ cond: sync.NewCond(&sync.Mutex{}),
+ }
+}
+
+func (l *flushLimiter) acquire(size uint64) error {
+ l.cond.L.Lock()
+ defer l.cond.L.Unlock()
+
+ // it is allowed to overflow maxSize to allow flushing objects with size > maxSize
+ for l.count > 0 && l.size+size > l.maxSize && !l.closed {
+ l.cond.Wait()
+ if l.closed {
+ return errLimiterClosed
+ }
+ }
+ l.count++
+ l.size += size
+ return nil
+}
+
+func (l *flushLimiter) release(size uint64) {
+ l.cond.L.Lock()
+ defer l.cond.L.Unlock()
+
+ if l.size >= size {
+ l.size -= size
+ } else {
+ panic("flushLimiter: invalid size")
+ }
+
+ if l.count > 0 {
+ l.count--
+ } else {
+ panic("flushLimiter: invalid count")
+ }
+
+ l.cond.Broadcast()
+}
+
+func (l *flushLimiter) close() {
+ l.cond.L.Lock()
+ defer l.cond.L.Unlock()
+
+ l.closed = true
+
+ l.cond.Broadcast()
+}
diff --git a/pkg/local_object_storage/writecache/limiter_test.go b/pkg/local_object_storage/writecache/limiter_test.go
new file mode 100644
index 000000000..db99b203a
--- /dev/null
+++ b/pkg/local_object_storage/writecache/limiter_test.go
@@ -0,0 +1,27 @@
+package writecache
+
+import (
+ "sync/atomic"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+ "golang.org/x/sync/errgroup"
+)
+
+func TestLimiter(t *testing.T) {
+ var maxSize uint64 = 10
+ var single uint64 = 3
+ l := newFlushLimiter(uint64(maxSize))
+ var currSize atomic.Int64
+ var eg errgroup.Group
+ for i := 0; i < 10_000; i++ {
+ eg.Go(func() error {
+ defer l.release(single)
+ defer currSize.Add(-1)
+ l.acquire(single)
+ require.True(t, currSize.Add(1) <= 3)
+ return nil
+ })
+ }
+ require.NoError(t, eg.Wait())
+}
diff --git a/pkg/local_object_storage/writecache/options.go b/pkg/local_object_storage/writecache/options.go
index 0643faac0..edbb3d422 100644
--- a/pkg/local_object_storage/writecache/options.go
+++ b/pkg/local_object_storage/writecache/options.go
@@ -44,6 +44,8 @@ type options struct {
disableBackgroundFlush bool
// pageSize is bbolt's page size config value
pageSize int
+ // flushSizeLimit is total size of flushing objects.
+ flushSizeLimit uint64
}
// WithLogger sets logger.
@@ -169,3 +171,10 @@ func WithPageSize(s int) Option {
o.pageSize = s
}
}
+
+// WithFlushSizeLimit sets flush size limit.
+func WithFlushSizeLimit(v uint64) Option {
+ return func(o *options) {
+ o.flushSizeLimit = v
+ }
+}
From 25d2ae8aaf22c12e9e625b0433f84a49f5f22b39 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Tue, 10 Sep 2024 13:12:17 +0300
Subject: [PATCH 103/705] [#1367] writecache: Drop BBolt related config
variables
Signed-off-by: Dmitrii Stepanov
---
.../internal/writecache/inspect.go | 2 +-
cmd/frostfs-lens/internal/writecache/list.go | 2 +-
cmd/frostfs-node/config.go | 12 -----
cmd/frostfs-node/config/engine/config_test.go | 4 --
.../config/engine/shard/writecache/config.go | 25 -----------
docs/storage-node-configuration.md | 6 ---
.../writecache/benchmark/writecache_test.go | 1 -
pkg/local_object_storage/writecache/cache.go | 21 ++++-----
pkg/local_object_storage/writecache/flush.go | 2 +-
.../writecache/flush_test.go | 13 +++---
.../writecache/options.go | 44 -------------------
pkg/local_object_storage/writecache/util.go | 3 +-
12 files changed, 17 insertions(+), 118 deletions(-)
diff --git a/cmd/frostfs-lens/internal/writecache/inspect.go b/cmd/frostfs-lens/internal/writecache/inspect.go
index 63c669a35..afc986c8b 100644
--- a/cmd/frostfs-lens/internal/writecache/inspect.go
+++ b/cmd/frostfs-lens/internal/writecache/inspect.go
@@ -25,7 +25,7 @@ func init() {
func inspectFunc(cmd *cobra.Command, _ []string) {
var data []byte
- db, err := writecache.OpenDB(vPath, true, os.OpenFile, 0)
+ db, err := writecache.OpenDB(vPath, true, os.OpenFile)
common.ExitOnErr(cmd, common.Errf("could not open write-cache db: %w", err))
defer db.Close()
diff --git a/cmd/frostfs-lens/internal/writecache/list.go b/cmd/frostfs-lens/internal/writecache/list.go
index 9c8fa6138..bcbae0ec9 100644
--- a/cmd/frostfs-lens/internal/writecache/list.go
+++ b/cmd/frostfs-lens/internal/writecache/list.go
@@ -31,7 +31,7 @@ func listFunc(cmd *cobra.Command, _ []string) {
return err
}
- db, err := writecache.OpenDB(vPath, true, os.OpenFile, 0)
+ db, err := writecache.OpenDB(vPath, true, os.OpenFile)
common.ExitOnErr(cmd, common.Errf("could not open write-cache db: %w", err))
defer db.Close()
diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go
index ef2752872..cdfabdebd 100644
--- a/cmd/frostfs-node/config.go
+++ b/cmd/frostfs-node/config.go
@@ -145,15 +145,11 @@ type shardCfg struct {
writecacheCfg struct {
enabled bool
path string
- maxBatchSize int
- maxBatchDelay time.Duration
- smallObjectSize uint64
maxObjSize uint64
flushWorkerCount int
sizeLimit uint64
countLimit uint64
noSync bool
- pageSize int
flushSizeLimit uint64
}
@@ -270,11 +266,7 @@ func (a *applicationConfiguration) setShardWriteCacheConfig(newConfig *shardCfg,
wc.enabled = true
wc.path = writeCacheCfg.Path()
- wc.maxBatchSize = writeCacheCfg.BoltDB().MaxBatchSize()
- wc.maxBatchDelay = writeCacheCfg.BoltDB().MaxBatchDelay()
- wc.pageSize = writeCacheCfg.BoltDB().PageSize()
wc.maxObjSize = writeCacheCfg.MaxObjectSize()
- wc.smallObjectSize = writeCacheCfg.SmallObjectSize()
wc.flushWorkerCount = writeCacheCfg.WorkerCount()
wc.sizeLimit = writeCacheCfg.SizeLimit()
wc.countLimit = writeCacheCfg.CountLimit()
@@ -864,12 +856,8 @@ func (c *cfg) getWriteCacheOpts(shCfg shardCfg) []writecache.Option {
if wcRead := shCfg.writecacheCfg; wcRead.enabled {
writeCacheOpts = append(writeCacheOpts,
writecache.WithPath(wcRead.path),
- writecache.WithMaxBatchSize(wcRead.maxBatchSize),
- writecache.WithMaxBatchDelay(wcRead.maxBatchDelay),
- writecache.WithPageSize(wcRead.pageSize),
writecache.WithFlushSizeLimit(wcRead.flushSizeLimit),
writecache.WithMaxObjectSize(wcRead.maxObjSize),
- writecache.WithSmallObjectSize(wcRead.smallObjectSize),
writecache.WithFlushWorkersCount(wcRead.flushWorkerCount),
writecache.WithMaxCacheSize(wcRead.sizeLimit),
writecache.WithMaxCacheCount(wcRead.countLimit),
diff --git a/cmd/frostfs-node/config/engine/config_test.go b/cmd/frostfs-node/config/engine/config_test.go
index b952aca4c..19ad0e7ac 100644
--- a/cmd/frostfs-node/config/engine/config_test.go
+++ b/cmd/frostfs-node/config/engine/config_test.go
@@ -73,11 +73,9 @@ func TestEngineSection(t *testing.T) {
require.Equal(t, true, wc.NoSync())
require.Equal(t, "tmp/0/cache", wc.Path())
- require.EqualValues(t, 16384, wc.SmallObjectSize())
require.EqualValues(t, 134217728, wc.MaxObjectSize())
require.EqualValues(t, 30, wc.WorkerCount())
require.EqualValues(t, 3221225472, wc.SizeLimit())
- require.EqualValues(t, 4096, wc.BoltDB().PageSize())
require.EqualValues(t, 49, wc.CountLimit())
require.EqualValues(t, uint64(100), wc.MaxFlushingObjectsSize())
@@ -131,11 +129,9 @@ func TestEngineSection(t *testing.T) {
require.Equal(t, false, wc.NoSync())
require.Equal(t, "tmp/1/cache", wc.Path())
- require.EqualValues(t, 16384, wc.SmallObjectSize())
require.EqualValues(t, 134217728, wc.MaxObjectSize())
require.EqualValues(t, 30, wc.WorkerCount())
require.EqualValues(t, 4294967296, wc.SizeLimit())
- require.EqualValues(t, 0, wc.BoltDB().PageSize())
require.EqualValues(t, writecacheconfig.CountLimitDefault, wc.CountLimit())
require.EqualValues(t, writecacheconfig.MaxFlushingObjectsSizeDefault, wc.MaxFlushingObjectsSize())
diff --git a/cmd/frostfs-node/config/engine/shard/writecache/config.go b/cmd/frostfs-node/config/engine/shard/writecache/config.go
index 5a069e99f..6fff0308b 100644
--- a/cmd/frostfs-node/config/engine/shard/writecache/config.go
+++ b/cmd/frostfs-node/config/engine/shard/writecache/config.go
@@ -2,7 +2,6 @@ package writecacheconfig
import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
- boltdbconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/boltdb"
)
// Config is a wrapper over the config section
@@ -10,9 +9,6 @@ import (
type Config config.Config
const (
- // SmallSizeDefault is a default size of small objects.
- SmallSizeDefault = 32 << 10
-
// MaxSizeDefault is a default value of the object payload size limit.
MaxSizeDefault = 64 << 20
@@ -56,22 +52,6 @@ func (x *Config) Path() string {
return p
}
-// SmallObjectSize returns the value of "small_object_size" config parameter.
-//
-// Returns SmallSizeDefault if the value is not a positive number.
-func (x *Config) SmallObjectSize() uint64 {
- s := config.SizeInBytesSafe(
- (*config.Config)(x),
- "small_object_size",
- )
-
- if s > 0 {
- return s
- }
-
- return SmallSizeDefault
-}
-
// MaxObjectSize returns the value of "max_object_size" config parameter.
//
// Returns MaxSizeDefault if the value is not a positive number.
@@ -143,11 +123,6 @@ func (x *Config) NoSync() bool {
return config.BoolSafe((*config.Config)(x), "no_sync")
}
-// BoltDB returns config instance for querying bolt db specific parameters.
-func (x *Config) BoltDB() *boltdbconfig.Config {
- return (*boltdbconfig.Config)(x)
-}
-
// MaxFlushingObjectsSize returns the value of "max_flushing_objects_size" config parameter.
//
// Returns MaxFlushingObjectsSizeDefault if the value is not a positive number.
diff --git a/docs/storage-node-configuration.md b/docs/storage-node-configuration.md
index c83828978..c74695e2b 100644
--- a/docs/storage-node-configuration.md
+++ b/docs/storage-node-configuration.md
@@ -287,10 +287,8 @@ writecache:
enabled: true
path: /path/to/writecache
capacity: 4294967296
- small_object_size: 16384
max_object_size: 134217728
flush_worker_count: 30
- page_size: '4k'
```
| Parameter | Type | Default value | Description |
@@ -298,13 +296,9 @@ writecache:
| `path` | `string` | | Path to the metabase file. |
| `capacity` | `size` | `1G` | Approximate maximum size of the writecache. If the writecache is full, objects are written to the blobstor directly. |
| `max_object_count` | `int` | unrestricted | Approximate maximum objects count in the writecache. If the writecache is full, objects are written to the blobstor directly. |
-| `small_object_size` | `size` | `32K` | Maximum object size for "small" objects. This objects are stored in a key-value database instead of a file-system. |
| `max_object_size` | `size` | `64M` | Maximum object size allowed to be stored in the writecache. |
| `flush_worker_count` | `int` | `20` | Amount of background workers that move data from the writecache to the blobstor. |
| `max_flushing_objects_size` | `size` | `512M` | Max total size of background flushing objects. |
-| `max_batch_size` | `int` | `1000` | Maximum amount of small object `PUT` operations to perform in a single transaction. |
-| `max_batch_delay` | `duration` | `10ms` | Maximum delay before a batch starts. |
-| `page_size` | `size` | `0` | Page size overrides the default OS page size for small objects storage. Does not affect the existing storage. |
# `node` section
diff --git a/pkg/local_object_storage/writecache/benchmark/writecache_test.go b/pkg/local_object_storage/writecache/benchmark/writecache_test.go
index 4da9a26d7..79ab7d9c6 100644
--- a/pkg/local_object_storage/writecache/benchmark/writecache_test.go
+++ b/pkg/local_object_storage/writecache/benchmark/writecache_test.go
@@ -118,6 +118,5 @@ func newCache(b *testing.B) writecache.Cache {
writecache.WithBlobstor(bs),
writecache.WithMetabase(testMetabase{}),
writecache.WithMaxCacheSize(256<<30),
- writecache.WithSmallObjectSize(128<<10),
)
}
diff --git a/pkg/local_object_storage/writecache/cache.go b/pkg/local_object_storage/writecache/cache.go
index f0f10d8b5..341071dc6 100644
--- a/pkg/local_object_storage/writecache/cache.go
+++ b/pkg/local_object_storage/writecache/cache.go
@@ -11,7 +11,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "go.etcd.io/bbolt"
"go.uber.org/zap"
)
@@ -42,9 +41,8 @@ type objectInfo struct {
}
const (
- defaultMaxObjectSize = 64 * 1024 * 1024 // 64 MiB
- defaultSmallObjectSize = 32 * 1024 // 32 KiB
- defaultMaxCacheSize = 1 << 30 // 1 GiB
+ defaultMaxObjectSize = 64 * 1024 * 1024 // 64 MiB
+ defaultMaxCacheSize = 1 << 30 // 1 GiB
)
var (
@@ -60,15 +58,12 @@ func New(opts ...Option) Cache {
counter: fstree.NewSimpleCounter(),
options: options{
- log: &logger.Logger{Logger: zap.NewNop()},
- maxObjectSize: defaultMaxObjectSize,
- smallObjectSize: defaultSmallObjectSize,
- workersCount: defaultFlushWorkersCount,
- maxCacheSize: defaultMaxCacheSize,
- maxBatchSize: bbolt.DefaultMaxBatchSize,
- maxBatchDelay: bbolt.DefaultMaxBatchDelay,
- metrics: DefaultMetrics(),
- flushSizeLimit: defaultFlushWorkersCount * defaultMaxObjectSize,
+ log: &logger.Logger{Logger: zap.NewNop()},
+ maxObjectSize: defaultMaxObjectSize,
+ workersCount: defaultFlushWorkersCount,
+ maxCacheSize: defaultMaxCacheSize,
+ metrics: DefaultMetrics(),
+ flushSizeLimit: defaultFlushWorkersCount * defaultMaxObjectSize,
},
}
diff --git a/pkg/local_object_storage/writecache/flush.go b/pkg/local_object_storage/writecache/flush.go
index 5d5fc13ab..10e4d68f0 100644
--- a/pkg/local_object_storage/writecache/flush.go
+++ b/pkg/local_object_storage/writecache/flush.go
@@ -253,7 +253,7 @@ func (c *cache) flushAndDropBBoltDB(ctx context.Context) error {
if err != nil {
return fmt.Errorf("could not check write-cache database existence: %w", err)
}
- db, err := OpenDB(c.path, true, os.OpenFile, c.pageSize)
+ db, err := OpenDB(c.path, true, os.OpenFile)
if err != nil {
return fmt.Errorf("could not open write-cache database: %w", err)
}
diff --git a/pkg/local_object_storage/writecache/flush_test.go b/pkg/local_object_storage/writecache/flush_test.go
index 9c7e240e0..59a4e4895 100644
--- a/pkg/local_object_storage/writecache/flush_test.go
+++ b/pkg/local_object_storage/writecache/flush_test.go
@@ -25,12 +25,11 @@ import (
func TestFlush(t *testing.T) {
testlogger := test.NewLogger(t)
- createCacheFn := func(t *testing.T, smallSize uint64, mb *meta.DB, bs MainStorage, opts ...Option) Cache {
+ createCacheFn := func(t *testing.T, mb *meta.DB, bs MainStorage, opts ...Option) Cache {
return New(
append([]Option{
WithLogger(testlogger),
WithPath(filepath.Join(t.TempDir(), "writecache")),
- WithSmallObjectSize(smallSize),
WithMetabase(mb),
WithBlobstor(bs),
WithDisableBackgroundFlush(),
@@ -92,7 +91,6 @@ const (
type CreateCacheFunc[Option any] func(
t *testing.T,
- smallSize uint64,
meta *meta.DB,
bs MainStorage,
opts ...Option,
@@ -115,7 +113,7 @@ func runFlushTest[Option any](
failures ...TestFailureInjector[Option],
) {
t.Run("no errors", func(t *testing.T) {
- wc, bs, mb := newCache(t, createCacheFn, smallSize)
+ wc, bs, mb := newCache(t, createCacheFn)
defer func() { require.NoError(t, wc.Close()) }()
objects := putObjects(t, wc)
@@ -128,7 +126,7 @@ func runFlushTest[Option any](
})
t.Run("flush on moving to degraded mode", func(t *testing.T) {
- wc, bs, mb := newCache(t, createCacheFn, smallSize)
+ wc, bs, mb := newCache(t, createCacheFn)
defer func() { require.NoError(t, wc.Close()) }()
objects := putObjects(t, wc)
@@ -146,7 +144,7 @@ func runFlushTest[Option any](
for _, f := range failures {
t.Run(f.Desc, func(t *testing.T) {
errCountOpt, errCount := errCountOption()
- wc, bs, mb := newCache(t, createCacheFn, smallSize, errCountOpt)
+ wc, bs, mb := newCache(t, createCacheFn, errCountOpt)
defer func() { require.NoError(t, wc.Close()) }()
objects := putObjects(t, wc)
f.InjectFn(t, wc)
@@ -168,7 +166,6 @@ func runFlushTest[Option any](
func newCache[Option any](
t *testing.T,
createCacheFn CreateCacheFunc[Option],
- smallSize uint64,
opts ...Option,
) (Cache, *blobstor.BlobStor, *meta.DB) {
dir := t.TempDir()
@@ -189,7 +186,7 @@ func newCache[Option any](
require.NoError(t, bs.Open(context.Background(), mode.ReadWrite))
require.NoError(t, bs.Init())
- wc := createCacheFn(t, smallSize, mb, bs, opts...)
+ wc := createCacheFn(t, mb, bs, opts...)
require.NoError(t, wc.Open(context.Background(), mode.ReadWrite))
require.NoError(t, wc.Init())
diff --git a/pkg/local_object_storage/writecache/options.go b/pkg/local_object_storage/writecache/options.go
index edbb3d422..66ac7805c 100644
--- a/pkg/local_object_storage/writecache/options.go
+++ b/pkg/local_object_storage/writecache/options.go
@@ -1,8 +1,6 @@
package writecache
import (
- "time"
-
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"go.uber.org/zap"
)
@@ -20,8 +18,6 @@ type options struct {
metabase Metabase
// maxObjectSize is the maximum size of the object stored in the write-cache.
maxObjectSize uint64
- // smallObjectSize is the maximum size of the object stored in the database.
- smallObjectSize uint64
// workersCount is the number of workers flushing objects in parallel.
workersCount int
// maxCacheSize is the maximum total size of all objects saved in cache (DB + FS).
@@ -30,10 +26,6 @@ type options struct {
// maxCacheCount is the maximum total count of all object saved in cache.
// 0 (no limit) by default.
maxCacheCount uint64
- // maxBatchSize is the maximum batch size for the small object database.
- maxBatchSize int
- // maxBatchDelay is the maximum batch wait time for the small object database.
- maxBatchDelay time.Duration
// noSync is true iff FSTree allows unsynchronized writes.
noSync bool
// reportError is the function called when encountering disk errors in background workers.
@@ -42,8 +34,6 @@ type options struct {
metrics Metrics
// disableBackgroundFlush is for testing purposes only.
disableBackgroundFlush bool
- // pageSize is bbolt's page size config value
- pageSize int
// flushSizeLimit is total size of flushing objects.
flushSizeLimit uint64
}
@@ -85,15 +75,6 @@ func WithMaxObjectSize(sz uint64) Option {
}
}
-// WithSmallObjectSize sets maximum object size to be stored in write-cache.
-func WithSmallObjectSize(sz uint64) Option {
- return func(o *options) {
- if sz > 0 {
- o.smallObjectSize = sz
- }
- }
-}
-
func WithFlushWorkersCount(c int) Option {
return func(o *options) {
if c > 0 {
@@ -116,24 +97,6 @@ func WithMaxCacheCount(v uint64) Option {
}
}
-// WithMaxBatchSize sets max batch size for the small object database.
-func WithMaxBatchSize(sz int) Option {
- return func(o *options) {
- if sz > 0 {
- o.maxBatchSize = sz
- }
- }
-}
-
-// WithMaxBatchDelay sets max batch delay for the small object database.
-func WithMaxBatchDelay(d time.Duration) Option {
- return func(o *options) {
- if d > 0 {
- o.maxBatchDelay = d
- }
- }
-}
-
// WithNoSync sets an option to allow returning to caller on PUT before write is persisted.
// Note, that we use this flag for FSTree only and DO NOT use it for a bolt DB because
// we cannot yet properly handle the corrupted database during the startup. This SHOULD NOT
@@ -165,13 +128,6 @@ func WithDisableBackgroundFlush() Option {
}
}
-// WithPageSize sets bbolt's page size.
-func WithPageSize(s int) Option {
- return func(o *options) {
- o.pageSize = s
- }
-}
-
// WithFlushSizeLimit sets flush size limit.
func WithFlushSizeLimit(v uint64) Option {
return func(o *options) {
diff --git a/pkg/local_object_storage/writecache/util.go b/pkg/local_object_storage/writecache/util.go
index ad3b443f3..0ed4a954e 100644
--- a/pkg/local_object_storage/writecache/util.go
+++ b/pkg/local_object_storage/writecache/util.go
@@ -10,12 +10,11 @@ import (
)
// OpenDB opens BoltDB instance for write-cache. Opens in read-only mode if ro is true.
-func OpenDB(p string, ro bool, openFile func(string, int, fs.FileMode) (*os.File, error), pageSize int) (*bbolt.DB, error) {
+func OpenDB(p string, ro bool, openFile func(string, int, fs.FileMode) (*os.File, error)) (*bbolt.DB, error) {
return bbolt.Open(filepath.Join(p, dbName), os.ModePerm, &bbolt.Options{
NoFreelistSync: true,
ReadOnly: ro,
Timeout: 100 * time.Millisecond,
OpenFile: openFile,
- PageSize: pageSize,
})
}
From 3b236160a61c8257aae5bd7078035838606650b7 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Wed, 11 Sep 2024 09:55:08 +0300
Subject: [PATCH 104/705] [#1367] writecache: Drop DB label from metrics
Signed-off-by: Dmitrii Stepanov
---
pkg/local_object_storage/engine/writecache.go | 10 ++++------
pkg/local_object_storage/writecache/metrics.go | 8 ++++----
pkg/local_object_storage/writecache/state.go | 4 ++--
3 files changed, 10 insertions(+), 12 deletions(-)
diff --git a/pkg/local_object_storage/engine/writecache.go b/pkg/local_object_storage/engine/writecache.go
index 7710bc7f4..e9ba3410f 100644
--- a/pkg/local_object_storage/engine/writecache.go
+++ b/pkg/local_object_storage/engine/writecache.go
@@ -169,18 +169,16 @@ func (m *writeCacheMetrics) Put(d time.Duration, success bool, st writecache.Sto
m.metrics.AddMethodDuration(m.shardID, m.path, st.String(), "Put", success, d)
}
-func (m *writeCacheMetrics) SetEstimateSize(db, fstree uint64) {
- m.metrics.SetEstimateSize(m.shardID, m.path, writecache.StorageTypeDB.String(), db)
- m.metrics.SetEstimateSize(m.shardID, m.path, writecache.StorageTypeFSTree.String(), fstree)
+func (m *writeCacheMetrics) SetEstimateSize(size uint64) {
+ m.metrics.SetEstimateSize(m.shardID, m.path, writecache.StorageTypeFSTree.String(), size)
}
func (m *writeCacheMetrics) SetMode(mod mode.ComponentMode) {
m.metrics.SetMode(m.shardID, mod.String())
}
-func (m *writeCacheMetrics) SetActualCounters(db, fstree uint64) {
- m.metrics.SetActualCount(m.shardID, m.path, writecache.StorageTypeDB.String(), db)
- m.metrics.SetActualCount(m.shardID, m.path, writecache.StorageTypeFSTree.String(), fstree)
+func (m *writeCacheMetrics) SetActualCounters(count uint64) {
+ m.metrics.SetActualCount(m.shardID, m.path, writecache.StorageTypeFSTree.String(), count)
}
func (m *writeCacheMetrics) Flush(success bool, st writecache.StorageType) {
diff --git a/pkg/local_object_storage/writecache/metrics.go b/pkg/local_object_storage/writecache/metrics.go
index e68b6d8be..e3641f85e 100644
--- a/pkg/local_object_storage/writecache/metrics.go
+++ b/pkg/local_object_storage/writecache/metrics.go
@@ -26,9 +26,9 @@ type Metrics interface {
Flush(success bool, st StorageType)
Evict(st StorageType)
- SetEstimateSize(db, fstree uint64)
+ SetEstimateSize(uint64)
SetMode(m mode.ComponentMode)
- SetActualCounters(db, fstree uint64)
+ SetActualCounters(uint64)
SetPath(path string)
Close()
}
@@ -47,11 +47,11 @@ func (metricsStub) Delete(time.Duration, bool, StorageType) {}
func (metricsStub) Put(time.Duration, bool, StorageType) {}
-func (metricsStub) SetEstimateSize(uint64, uint64) {}
+func (metricsStub) SetEstimateSize(uint64) {}
func (metricsStub) SetMode(mode.ComponentMode) {}
-func (metricsStub) SetActualCounters(uint64, uint64) {}
+func (metricsStub) SetActualCounters(uint64) {}
func (metricsStub) Flush(bool, StorageType) {}
diff --git a/pkg/local_object_storage/writecache/state.go b/pkg/local_object_storage/writecache/state.go
index 748c78bcb..835686fbb 100644
--- a/pkg/local_object_storage/writecache/state.go
+++ b/pkg/local_object_storage/writecache/state.go
@@ -2,8 +2,8 @@ package writecache
func (c *cache) estimateCacheSize() (uint64, uint64) {
count, size := c.counter.CountSize()
- c.metrics.SetEstimateSize(0, size)
- c.metrics.SetActualCounters(0, count)
+ c.metrics.SetEstimateSize(size)
+ c.metrics.SetActualCounters(count)
return count, size
}
From f345fe9a581762fcd5827719d1781e16ce133056 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Wed, 11 Sep 2024 17:08:52 +0300
Subject: [PATCH 105/705] [#1367] writecache: Move DB related code to
upgrade.go
This is done to drop this file in the future.
Signed-off-by: Dmitrii Stepanov
---
pkg/local_object_storage/writecache/cache.go | 5 +-
pkg/local_object_storage/writecache/flush.go | 86 --------------
.../writecache/storage.go | 2 -
.../writecache/upgrade.go | 110 ++++++++++++++++++
pkg/local_object_storage/writecache/util.go | 20 ----
5 files changed, 111 insertions(+), 112 deletions(-)
create mode 100644 pkg/local_object_storage/writecache/upgrade.go
delete mode 100644 pkg/local_object_storage/writecache/util.go
diff --git a/pkg/local_object_storage/writecache/cache.go b/pkg/local_object_storage/writecache/cache.go
index 341071dc6..b97fc5856 100644
--- a/pkg/local_object_storage/writecache/cache.go
+++ b/pkg/local_object_storage/writecache/cache.go
@@ -45,10 +45,7 @@ const (
defaultMaxCacheSize = 1 << 30 // 1 GiB
)
-var (
- defaultBucket = []byte{0}
- dummyCanceler context.CancelFunc = func() {}
-)
+var dummyCanceler context.CancelFunc = func() {}
// New creates new writecache instance.
func New(opts ...Option) Cache {
diff --git a/pkg/local_object_storage/writecache/flush.go b/pkg/local_object_storage/writecache/flush.go
index 10e4d68f0..83933375b 100644
--- a/pkg/local_object_storage/writecache/flush.go
+++ b/pkg/local_object_storage/writecache/flush.go
@@ -1,12 +1,8 @@
package writecache
import (
- "bytes"
"context"
"errors"
- "fmt"
- "os"
- "path/filepath"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
@@ -20,8 +16,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "go.etcd.io/bbolt"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
@@ -239,83 +233,3 @@ func (c *cache) Flush(ctx context.Context, ignoreErrors, seal bool) error {
func (c *cache) flush(ctx context.Context, ignoreErrors bool) error {
return c.flushFSTree(ctx, ignoreErrors)
}
-
-type batchItem struct {
- data []byte
- address string
-}
-
-func (c *cache) flushAndDropBBoltDB(ctx context.Context) error {
- _, err := os.Stat(filepath.Join(c.path, dbName))
- if err != nil && os.IsNotExist(err) {
- return nil
- }
- if err != nil {
- return fmt.Errorf("could not check write-cache database existence: %w", err)
- }
- db, err := OpenDB(c.path, true, os.OpenFile)
- if err != nil {
- return fmt.Errorf("could not open write-cache database: %w", err)
- }
- defer func() {
- _ = db.Close()
- }()
-
- var last string
- for {
- batch, err := c.readNextDBBatch(db, last)
- if err != nil {
- return err
- }
- if len(batch) == 0 {
- break
- }
- for _, item := range batch {
- var obj objectSDK.Object
- if err := obj.Unmarshal(item.data); err != nil {
- return fmt.Errorf("unmarshal object from database: %w", err)
- }
- if err := c.flushObject(ctx, &obj, item.data, StorageTypeDB); err != nil {
- return fmt.Errorf("flush object from database: %w", err)
- }
- }
- last = batch[len(batch)-1].address
- }
- if err := db.Close(); err != nil {
- return fmt.Errorf("close write-cache database: %w", err)
- }
- if err := os.Remove(filepath.Join(c.path, dbName)); err != nil {
- return fmt.Errorf("remove write-cache database: %w", err)
- }
- return nil
-}
-
-func (c *cache) readNextDBBatch(db *bbolt.DB, last string) ([]batchItem, error) {
- const batchSize = 100
- var batch []batchItem
- err := db.View(func(tx *bbolt.Tx) error {
- var addr oid.Address
-
- b := tx.Bucket(defaultBucket)
- cs := b.Cursor()
- for k, data := cs.Seek([]byte(last)); k != nil; k, data = cs.Next() {
- sa := string(k)
- if sa == last {
- continue
- }
- if err := addr.DecodeString(sa); err != nil {
- return fmt.Errorf("decode address from database: %w", err)
- }
-
- batch = append(batch, batchItem{data: bytes.Clone(data), address: sa})
- if len(batch) == batchSize {
- return errIterationCompleted
- }
- }
- return nil
- })
- if err == nil || errors.Is(err, errIterationCompleted) {
- return batch, nil
- }
- return nil, err
-}
diff --git a/pkg/local_object_storage/writecache/storage.go b/pkg/local_object_storage/writecache/storage.go
index e708a529e..6aface7a5 100644
--- a/pkg/local_object_storage/writecache/storage.go
+++ b/pkg/local_object_storage/writecache/storage.go
@@ -16,8 +16,6 @@ import (
"go.uber.org/zap"
)
-const dbName = "small.bolt"
-
func (c *cache) openStore(mod mode.ComponentMode) error {
err := util.MkdirAllX(c.path, os.ModePerm)
if err != nil {
diff --git a/pkg/local_object_storage/writecache/upgrade.go b/pkg/local_object_storage/writecache/upgrade.go
new file mode 100644
index 000000000..3a100f1a3
--- /dev/null
+++ b/pkg/local_object_storage/writecache/upgrade.go
@@ -0,0 +1,110 @@
+package writecache
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "io/fs"
+ "os"
+ "path/filepath"
+ "time"
+
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "go.etcd.io/bbolt"
+)
+
+const dbName = "small.bolt"
+
+var defaultBucket = []byte{0}
+
+func (c *cache) flushAndDropBBoltDB(ctx context.Context) error {
+ _, err := os.Stat(filepath.Join(c.path, dbName))
+ if err != nil && os.IsNotExist(err) {
+ return nil
+ }
+ if err != nil {
+ return fmt.Errorf("could not check write-cache database existence: %w", err)
+ }
+ db, err := OpenDB(c.path, true, os.OpenFile)
+ if err != nil {
+ return fmt.Errorf("could not open write-cache database: %w", err)
+ }
+ defer func() {
+ _ = db.Close()
+ }()
+
+ var last string
+ for {
+ batch, err := c.readNextDBBatch(db, last)
+ if err != nil {
+ return err
+ }
+ if len(batch) == 0 {
+ break
+ }
+ for _, item := range batch {
+ var obj objectSDK.Object
+ if err := obj.Unmarshal(item.data); err != nil {
+ return fmt.Errorf("unmarshal object from database: %w", err)
+ }
+ if err := c.flushObject(ctx, &obj, item.data, StorageTypeDB); err != nil {
+ return fmt.Errorf("flush object from database: %w", err)
+ }
+ }
+ last = batch[len(batch)-1].address
+ }
+ if err := db.Close(); err != nil {
+ return fmt.Errorf("close write-cache database: %w", err)
+ }
+ if err := os.Remove(filepath.Join(c.path, dbName)); err != nil {
+ return fmt.Errorf("remove write-cache database: %w", err)
+ }
+ return nil
+}
+
+type batchItem struct {
+ data []byte
+ address string
+}
+
+func (c *cache) readNextDBBatch(db *bbolt.DB, last string) ([]batchItem, error) {
+ const batchSize = 100
+ var batch []batchItem
+ err := db.View(func(tx *bbolt.Tx) error {
+ var addr oid.Address
+
+ b := tx.Bucket(defaultBucket)
+ cs := b.Cursor()
+ for k, data := cs.Seek([]byte(last)); k != nil; k, data = cs.Next() {
+ sa := string(k)
+ if sa == last {
+ continue
+ }
+ if err := addr.DecodeString(sa); err != nil {
+ return fmt.Errorf("decode address from database: %w", err)
+ }
+
+ batch = append(batch, batchItem{data: bytes.Clone(data), address: sa})
+ if len(batch) == batchSize {
+ return errIterationCompleted
+ }
+ }
+ return nil
+ })
+ if err == nil || errors.Is(err, errIterationCompleted) {
+ return batch, nil
+ }
+ return nil, err
+}
+
+// OpenDB opens BoltDB instance for write-cache. Opens in read-only mode if ro is true.
+func OpenDB(p string, ro bool, openFile func(string, int, fs.FileMode) (*os.File, error)) (*bbolt.DB, error) {
+ return bbolt.Open(filepath.Join(p, dbName), os.ModePerm, &bbolt.Options{
+ NoFreelistSync: true,
+ ReadOnly: ro,
+ Timeout: 100 * time.Millisecond,
+ OpenFile: openFile,
+ })
+}
diff --git a/pkg/local_object_storage/writecache/util.go b/pkg/local_object_storage/writecache/util.go
deleted file mode 100644
index 0ed4a954e..000000000
--- a/pkg/local_object_storage/writecache/util.go
+++ /dev/null
@@ -1,20 +0,0 @@
-package writecache
-
-import (
- "io/fs"
- "os"
- "path/filepath"
- "time"
-
- "go.etcd.io/bbolt"
-)
-
-// OpenDB opens BoltDB instance for write-cache. Opens in read-only mode if ro is true.
-func OpenDB(p string, ro bool, openFile func(string, int, fs.FileMode) (*os.File, error)) (*bbolt.DB, error) {
- return bbolt.Open(filepath.Join(p, dbName), os.ModePerm, &bbolt.Options{
- NoFreelistSync: true,
- ReadOnly: ro,
- Timeout: 100 * time.Millisecond,
- OpenFile: openFile,
- })
-}
From b33559754df994c4e2e37f1b5b6c8f29ac8f97f1 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Thu, 12 Sep 2024 12:33:12 +0300
Subject: [PATCH 106/705] [#1367] fstree: Add size hint for Delete
This allow to not to call `os.Stat` if caller already knows data size.
Signed-off-by: Dmitrii Stepanov
---
.../blobstor/common/delete.go | 1 +
.../blobstor/fstree/fstree.go | 2 +-
.../blobstor/fstree/fstree_test.go | 84 +++++++++++++------
.../blobstor/fstree/fstree_write_generic.go | 19 +++--
.../blobstor/fstree/fstree_write_linux.go | 25 +++---
pkg/local_object_storage/writecache/flush.go | 4 +-
.../writecache/storage.go | 4 +-
7 files changed, 92 insertions(+), 47 deletions(-)
diff --git a/pkg/local_object_storage/blobstor/common/delete.go b/pkg/local_object_storage/blobstor/common/delete.go
index 1b04eab1a..c19e099cb 100644
--- a/pkg/local_object_storage/blobstor/common/delete.go
+++ b/pkg/local_object_storage/blobstor/common/delete.go
@@ -8,6 +8,7 @@ import (
type DeletePrm struct {
Address oid.Address
StorageID []byte
+ Size uint64
}
// DeleteRes groups the resulting values of Delete operation.
diff --git a/pkg/local_object_storage/blobstor/fstree/fstree.go b/pkg/local_object_storage/blobstor/fstree/fstree.go
index 1c60ec340..057796db2 100644
--- a/pkg/local_object_storage/blobstor/fstree/fstree.go
+++ b/pkg/local_object_storage/blobstor/fstree/fstree.go
@@ -338,7 +338,7 @@ func (t *FSTree) Delete(ctx context.Context, prm common.DeletePrm) (common.Delet
}
p := t.treePath(prm.Address)
- err = t.writer.removeFile(p)
+ err = t.writer.removeFile(p, prm.Size)
return common.DeleteRes{}, err
}
diff --git a/pkg/local_object_storage/blobstor/fstree/fstree_test.go b/pkg/local_object_storage/blobstor/fstree/fstree_test.go
index f39c7296e..eb2126b6c 100644
--- a/pkg/local_object_storage/blobstor/fstree/fstree_test.go
+++ b/pkg/local_object_storage/blobstor/fstree/fstree_test.go
@@ -68,34 +68,70 @@ func TestObjectCounter(t *testing.T) {
var delPrm common.DeletePrm
delPrm.Address = addr
- eg, egCtx := errgroup.WithContext(context.Background())
+ t.Run("without size hint", func(t *testing.T) {
+ eg, egCtx := errgroup.WithContext(context.Background())
- eg.Go(func() error {
- for range 1_000 {
- _, err := fst.Put(egCtx, putPrm)
- if err != nil {
- return err
+ eg.Go(func() error {
+ for range 1_000 {
+ _, err := fst.Put(egCtx, putPrm)
+ if err != nil {
+ return err
+ }
}
- }
- return nil
+ return nil
+ })
+
+ eg.Go(func() error {
+ var le logicerr.Logical
+ for range 1_000 {
+ _, err := fst.Delete(egCtx, delPrm)
+ if err != nil && !errors.As(err, &le) {
+ return err
+ }
+ }
+ return nil
+ })
+
+ require.NoError(t, eg.Wait())
+
+ count, size = counter.CountSize()
+ realCount, realSize, err := fst.countFiles()
+ require.NoError(t, err)
+ require.Equal(t, realCount, count, "real %d, actual %d", realCount, count)
+ require.Equal(t, realSize, size, "real %d, actual %d", realSize, size)
})
- eg.Go(func() error {
- var le logicerr.Logical
- for range 1_000 {
- _, err := fst.Delete(egCtx, delPrm)
- if err != nil && !errors.As(err, &le) {
- return err
+ t.Run("with size hint", func(t *testing.T) {
+ delPrm.Size = uint64(len(putPrm.RawData))
+ eg, egCtx := errgroup.WithContext(context.Background())
+
+ eg.Go(func() error {
+ for range 1_000 {
+ _, err := fst.Put(egCtx, putPrm)
+ if err != nil {
+ return err
+ }
}
- }
- return nil
+ return nil
+ })
+
+ eg.Go(func() error {
+ var le logicerr.Logical
+ for range 1_000 {
+ _, err := fst.Delete(egCtx, delPrm)
+ if err != nil && !errors.As(err, &le) {
+ return err
+ }
+ }
+ return nil
+ })
+
+ require.NoError(t, eg.Wait())
+
+ count, size = counter.CountSize()
+ realCount, realSize, err := fst.countFiles()
+ require.NoError(t, err)
+ require.Equal(t, realCount, count, "real %d, actual %d", realCount, count)
+ require.Equal(t, realSize, size, "real %d, actual %d", realSize, size)
})
-
- require.NoError(t, eg.Wait())
-
- count, size = counter.CountSize()
- realCount, realSize, err := fst.countFiles()
- require.NoError(t, err)
- require.Equal(t, realCount, count, "real %d, actual %d", realCount, count)
- require.Equal(t, realSize, size, "real %d, actual %d", realSize, size)
}
diff --git a/pkg/local_object_storage/blobstor/fstree/fstree_write_generic.go b/pkg/local_object_storage/blobstor/fstree/fstree_write_generic.go
index 801fc4a22..4110ba7d7 100644
--- a/pkg/local_object_storage/blobstor/fstree/fstree_write_generic.go
+++ b/pkg/local_object_storage/blobstor/fstree/fstree_write_generic.go
@@ -16,7 +16,7 @@ import (
type writer interface {
writeData(string, []byte) error
- removeFile(string) error
+ removeFile(string, uint64) error
}
type genericWriter struct {
@@ -107,10 +107,10 @@ func (w *genericWriter) writeFile(p string, data []byte) error {
return err
}
-func (w *genericWriter) removeFile(p string) error {
+func (w *genericWriter) removeFile(p string, size uint64) error {
var err error
if w.fileCounterEnabled {
- err = w.removeWithCounter(p)
+ err = w.removeWithCounter(p, size)
} else {
err = os.Remove(p)
}
@@ -121,18 +121,21 @@ func (w *genericWriter) removeFile(p string) error {
return err
}
-func (w *genericWriter) removeWithCounter(p string) error {
+func (w *genericWriter) removeWithCounter(p string, size uint64) error {
w.fileGuard.Lock(p)
defer w.fileGuard.Unlock(p)
- stat, err := os.Stat(p)
- if err != nil {
- return err
+ if size == 0 {
+ stat, err := os.Stat(p)
+ if err != nil {
+ return err
+ }
+ size = uint64(stat.Size())
}
if err := os.Remove(p); err != nil {
return err
}
- w.fileCounter.Dec(uint64(stat.Size()))
+ w.fileCounter.Dec(uint64(size))
return nil
}
diff --git a/pkg/local_object_storage/blobstor/fstree/fstree_write_linux.go b/pkg/local_object_storage/blobstor/fstree/fstree_write_linux.go
index 3127579ac..3561c616b 100644
--- a/pkg/local_object_storage/blobstor/fstree/fstree_write_linux.go
+++ b/pkg/local_object_storage/blobstor/fstree/fstree_write_linux.go
@@ -91,25 +91,30 @@ func (w *linuxWriter) writeFile(p string, data []byte) error {
return errClose
}
-func (w *linuxWriter) removeFile(p string) error {
+func (w *linuxWriter) removeFile(p string, size uint64) error {
if w.fileCounterEnabled {
w.fileGuard.Lock(p)
defer w.fileGuard.Unlock(p)
- }
- var stat unix.Stat_t
- err := unix.Stat(p, &stat)
- if err != nil {
- if err == unix.ENOENT {
- return logicerr.Wrap(new(apistatus.ObjectNotFound))
+
+ if size == 0 {
+ var stat unix.Stat_t
+ err := unix.Stat(p, &stat)
+ if err != nil {
+ if err == unix.ENOENT {
+ return logicerr.Wrap(new(apistatus.ObjectNotFound))
+ }
+ return err
+ }
+ size = uint64(stat.Size)
}
- return err
}
- err = unix.Unlink(p)
+
+ err := unix.Unlink(p)
if err != nil && err == unix.ENOENT {
return logicerr.Wrap(new(apistatus.ObjectNotFound))
}
if err == nil {
- w.fileCounter.Dec(uint64(stat.Size))
+ w.fileCounter.Dec(uint64(size))
}
return err
}
diff --git a/pkg/local_object_storage/writecache/flush.go b/pkg/local_object_storage/writecache/flush.go
index 83933375b..bfa6aacb0 100644
--- a/pkg/local_object_storage/writecache/flush.go
+++ b/pkg/local_object_storage/writecache/flush.go
@@ -123,7 +123,7 @@ func (c *cache) flushIfAnObjectExistsWorker(ctx context.Context, objInfo objectI
return
}
- c.deleteFromDisk(ctx, objInfo.addr)
+ c.deleteFromDisk(ctx, objInfo.addr, uint64(len(res.RawData)))
}
func (c *cache) reportFlushError(msg string, addr string, err error) {
@@ -157,7 +157,7 @@ func (c *cache) flushFSTree(ctx context.Context, ignoreErrors bool) error {
return err
}
- c.deleteFromDisk(ctx, e.Address)
+ c.deleteFromDisk(ctx, e.Address, uint64(len(e.ObjectData)))
return nil
}
diff --git a/pkg/local_object_storage/writecache/storage.go b/pkg/local_object_storage/writecache/storage.go
index 6aface7a5..2e52e5b20 100644
--- a/pkg/local_object_storage/writecache/storage.go
+++ b/pkg/local_object_storage/writecache/storage.go
@@ -40,8 +40,8 @@ func (c *cache) openStore(mod mode.ComponentMode) error {
return nil
}
-func (c *cache) deleteFromDisk(ctx context.Context, addr oid.Address) {
- _, err := c.fsTree.Delete(ctx, common.DeletePrm{Address: addr})
+func (c *cache) deleteFromDisk(ctx context.Context, addr oid.Address, size uint64) {
+ _, err := c.fsTree.Delete(ctx, common.DeletePrm{Address: addr, Size: size})
if err != nil && !client.IsErrObjectNotFound(err) {
c.log.Error(logs.WritecacheCantRemoveObjectFromWritecache, zap.Error(err))
} else if err == nil {
From e3764c51df237289431eacd0051bb3754e7682cc Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Thu, 12 Sep 2024 09:53:21 +0300
Subject: [PATCH 107/705] [#1347] metabase: Fix EC search
For EC chunks need to return EC parent object ID as
EC chunks don't have own attributes but inherit parent's.
Signed-off-by: Dmitrii Stepanov
---
pkg/local_object_storage/metabase/select.go | 21 +++++++++------
.../metabase/select_test.go | 27 +++++++++++++++++++
2 files changed, 40 insertions(+), 8 deletions(-)
diff --git a/pkg/local_object_storage/metabase/select.go b/pkg/local_object_storage/metabase/select.go
index 1863fc25e..ed43fc41f 100644
--- a/pkg/local_object_storage/metabase/select.go
+++ b/pkg/local_object_storage/metabase/select.go
@@ -150,7 +150,8 @@ func (db *DB) selectObjects(tx *bbolt.Tx, cnr cid.ID, fs objectSDK.SearchFilters
continue // ignore removed objects
}
- if !db.matchSlowFilters(tx, addr, group.slowFilters, currEpoch) {
+ addr, match := db.matchSlowFilters(tx, addr, group.slowFilters, currEpoch)
+ if !match {
continue // ignore objects with unmatched slow filters
}
@@ -382,15 +383,16 @@ func (db *DB) selectObjectID(
}
// matchSlowFilters return true if object header is matched by all slow filters.
-func (db *DB) matchSlowFilters(tx *bbolt.Tx, addr oid.Address, f objectSDK.SearchFilters, currEpoch uint64) bool {
+func (db *DB) matchSlowFilters(tx *bbolt.Tx, addr oid.Address, f objectSDK.SearchFilters, currEpoch uint64) (oid.Address, bool) {
+ result := addr
if len(f) == 0 {
- return true
+ return result, true
}
buf := make([]byte, addressKeySize)
obj, err := db.get(tx, addr, buf, true, false, currEpoch)
if err != nil {
- return false
+ return result, false
}
for i := range f {
@@ -415,23 +417,26 @@ func (db *DB) matchSlowFilters(tx *bbolt.Tx, addr oid.Address, f objectSDK.Searc
default: // user attribute
v, ok := attributeValue(obj, f[i].Header())
if ok {
+ if ech := obj.ECHeader(); ech != nil {
+ result.SetObject(ech.Parent())
+ }
data = []byte(v)
} else {
- return f[i].Operation() == objectSDK.MatchNotPresent
+ return result, f[i].Operation() == objectSDK.MatchNotPresent
}
}
matchFunc, ok := db.matchers[f[i].Operation()]
if !ok {
- return false
+ return result, false
}
if !matchFunc.matchSlow(f[i].Header(), data, f[i].Value()) {
- return false
+ return result, false
}
}
- return true
+ return result, true
}
func attributeValue(obj *objectSDK.Object, attribute string) (string, bool) {
diff --git a/pkg/local_object_storage/metabase/select_test.go b/pkg/local_object_storage/metabase/select_test.go
index 0fab3a108..6469bbdbc 100644
--- a/pkg/local_object_storage/metabase/select_test.go
+++ b/pkg/local_object_storage/metabase/select_test.go
@@ -70,6 +70,22 @@ func TestDB_SelectUserAttributes(t *testing.T) {
err = putBig(db, raw6)
require.NoError(t, err)
+ raw7 := testutil.GenerateObjectWithCID(cnr)
+ var attr objectSDK.Attribute
+ attr.SetKey("path")
+ attr.SetValue("test/3/4")
+ attrs := raw7.Attributes()
+ attrs = append(attrs, attr)
+ ech := objectSDK.NewECHeader(objectSDK.ECParentInfo{
+ ID: oidtest.ID(),
+ Attributes: attrs,
+ }, 0, 3, []byte{}, 0)
+ raw7.SetECHeader(ech)
+ require.NoError(t, putBig(db, raw7))
+ var raw7Parent oid.Address
+ raw7Parent.SetContainer(cnr)
+ raw7Parent.SetObject(ech.Parent())
+
fs := objectSDK.SearchFilters{}
fs.AddFilter("foo", "bar", objectSDK.MatchStringEqual)
testSelect(t, db, cnr, fs,
@@ -100,6 +116,7 @@ func TestDB_SelectUserAttributes(t *testing.T) {
object.AddressOf(raw4),
object.AddressOf(raw5),
object.AddressOf(raw6),
+ object.AddressOf(raw7),
)
fs = objectSDK.SearchFilters{}
@@ -110,6 +127,7 @@ func TestDB_SelectUserAttributes(t *testing.T) {
object.AddressOf(raw4),
object.AddressOf(raw5),
object.AddressOf(raw6),
+ object.AddressOf(raw7),
)
fs = objectSDK.SearchFilters{}
@@ -120,6 +138,7 @@ func TestDB_SelectUserAttributes(t *testing.T) {
object.AddressOf(raw4),
object.AddressOf(raw5),
object.AddressOf(raw6),
+ object.AddressOf(raw7),
)
fs = objectSDK.SearchFilters{}
@@ -131,6 +150,7 @@ func TestDB_SelectUserAttributes(t *testing.T) {
object.AddressOf(raw4),
object.AddressOf(raw5),
object.AddressOf(raw6),
+ object.AddressOf(raw7),
)
fs = objectSDK.SearchFilters{}
@@ -139,6 +159,7 @@ func TestDB_SelectUserAttributes(t *testing.T) {
object.AddressOf(raw4),
object.AddressOf(raw5),
object.AddressOf(raw6),
+ raw7Parent,
)
fs = objectSDK.SearchFilters{}
@@ -147,6 +168,12 @@ func TestDB_SelectUserAttributes(t *testing.T) {
object.AddressOf(raw4),
object.AddressOf(raw5),
)
+
+ fs = objectSDK.SearchFilters{}
+ fs.AddFilter("path", "test/3/4", objectSDK.MatchStringEqual)
+ testSelect(t, db, cnr, fs,
+ raw7Parent,
+ )
}
func TestDB_SelectRootPhyParent(t *testing.T) {
From 546d09660f52d262c3c053b72457170f5c4b68e7 Mon Sep 17 00:00:00 2001
From: Aleksey Savchuk
Date: Thu, 12 Sep 2024 17:05:14 +0300
Subject: [PATCH 108/705] [#1283] Clear systemd-notify status on exit
Signed-off-by: Aleksey Savchuk
---
cmd/frostfs-ir/main.go | 5 +++++
cmd/frostfs-node/config.go | 4 ++++
pkg/util/sdnotify/sdnotify.go | 5 +++++
3 files changed, 14 insertions(+)
diff --git a/cmd/frostfs-ir/main.go b/cmd/frostfs-ir/main.go
index 5b852c37c..02936ae78 100644
--- a/cmd/frostfs-ir/main.go
+++ b/cmd/frostfs-ir/main.go
@@ -13,6 +13,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/misc"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/sdnotify"
"github.com/spf13/viper"
"go.uber.org/zap"
)
@@ -124,4 +125,8 @@ func shutdown() {
zap.String("error", err.Error()),
)
}
+
+ if err := sdnotify.ClearStatus(); err != nil {
+ log.Error(logs.FailedToReportStatusToSystemd, zap.Error(err))
+ }
}
diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go
index cdfabdebd..f98f1c1a3 100644
--- a/cmd/frostfs-node/config.go
+++ b/cmd/frostfs-node/config.go
@@ -1398,4 +1398,8 @@ func (c *cfg) shutdown() {
for i := range c.closers {
c.closers[len(c.closers)-1-i].fn()
}
+
+ if err := sdnotify.ClearStatus(); err != nil {
+ c.log.Error(logs.FailedToReportStatusToSystemd, zap.Error(err))
+ }
}
diff --git a/pkg/util/sdnotify/sdnotify.go b/pkg/util/sdnotify/sdnotify.go
index 5235315cc..e94ff77ad 100644
--- a/pkg/util/sdnotify/sdnotify.go
+++ b/pkg/util/sdnotify/sdnotify.go
@@ -69,6 +69,11 @@ func Status(status string) error {
return Send("STATUS=" + status)
}
+// ClearStatus resets the current service status previously set by Status.
+func ClearStatus() error {
+ return Status("")
+}
+
// Send state through the notify socket if any.
// If the notify socket was not detected, it returns an error.
func Send(state string) error {
From bb448674918330ddd15df6462fbde860eca5bc64 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Fri, 13 Sep 2024 11:31:33 +0300
Subject: [PATCH 109/705] [#1374] go.mod: Upgrade grpc to v1.66.2
Signed-off-by: Dmitrii Stepanov
---
go.mod | 14 +++++++-------
go.sum | 28 ++++++++++++++--------------
2 files changed, 21 insertions(+), 21 deletions(-)
diff --git a/go.mod b/go.mod
index 93eef5b8c..78fefc9ae 100644
--- a/go.mod
+++ b/go.mod
@@ -46,9 +46,9 @@ require (
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56
golang.org/x/sync v0.7.0
golang.org/x/sys v0.22.0
- golang.org/x/term v0.18.0
- google.golang.org/grpc v1.63.2
- google.golang.org/protobuf v1.33.0
+ golang.org/x/term v0.21.0
+ google.golang.org/grpc v1.66.2
+ google.golang.org/protobuf v1.34.1
gopkg.in/yaml.v3 v3.0.1
)
@@ -122,11 +122,11 @@ require (
go.opentelemetry.io/otel/sdk v1.22.0 // indirect
go.opentelemetry.io/proto/otlp v1.1.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
- golang.org/x/crypto v0.21.0 // indirect
- golang.org/x/net v0.23.0 // indirect
+ golang.org/x/crypto v0.24.0 // indirect
+ golang.org/x/net v0.26.0 // indirect
golang.org/x/text v0.16.0 // indirect
- google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237 // indirect
- google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 // indirect
+ google.golang.org/genproto/googleapis/api v0.0.0-20240604185151-ef581f913117 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20240604185151-ef581f913117 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
lukechampine.com/blake3 v1.2.1 // indirect
rsc.io/tmplfunc v0.0.3 // indirect
diff --git a/go.sum b/go.sum
index 102501484..2147f8988 100644
--- a/go.sum
+++ b/go.sum
@@ -306,8 +306,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
-golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA=
-golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
+golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI=
+golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM=
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8=
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
@@ -327,8 +327,8 @@ golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
-golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs=
-golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
+golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ=
+golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -370,8 +370,8 @@ golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0=
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
-golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8=
-golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
+golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA=
+golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
@@ -393,12 +393,12 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
-google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237 h1:RFiFrvy37/mpSpdySBDrUdipW/dHwsRwh3J3+A9VgT4=
-google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237/go.mod h1:Z5Iiy3jtmioajWHDGFk7CeugTyHtPvMHA4UTmUkyalE=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 h1:NnYq6UN9ReLM9/Y01KWNOWyI5xQ9kbIms5GGJVwS/Yc=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY=
-google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM=
-google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA=
+google.golang.org/genproto/googleapis/api v0.0.0-20240604185151-ef581f913117 h1:+rdxYoE3E5htTEWIe15GlN6IfvbURM//Jt0mmkmm6ZU=
+google.golang.org/genproto/googleapis/api v0.0.0-20240604185151-ef581f913117/go.mod h1:OimBR/bc1wPO9iV4NC2bpyjy3VnAwZh5EBPQdtaE5oo=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240604185151-ef581f913117 h1:1GBuWVLM/KMVUv1t1En5Gs+gFZCNd360GGb4sSxtrhU=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240604185151-ef581f913117/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0=
+google.golang.org/grpc v1.66.2 h1:3QdXkuq3Bkh7w+ywLdLvM56cmGvQHUMZpiCzt6Rqaoo=
+google.golang.org/grpc v1.66.2/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@@ -407,8 +407,8 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
-google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
+google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg=
+google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
From 944160427bad682df038f75b94cfad1e3e23aa41 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Fri, 13 Sep 2024 11:32:03 +0300
Subject: [PATCH 110/705] [#1374] cli: Drop deprecated grpc connection method
For `frostfs-cli` it is ok to use grpc-client without blocking,
as `frostfs-cli` will perform RPC call anyway.
Signed-off-by: Dmitrii Stepanov
---
cmd/frostfs-cli/modules/tree/add.go | 5 +++--
cmd/frostfs-cli/modules/tree/add_by_path.go | 5 +++--
cmd/frostfs-cli/modules/tree/client.go | 22 +++++++++++----------
cmd/frostfs-cli/modules/tree/get_by_path.go | 5 +++--
cmd/frostfs-cli/modules/tree/get_op_log.go | 5 +++--
cmd/frostfs-cli/modules/tree/healthcheck.go | 5 +++--
cmd/frostfs-cli/modules/tree/list.go | 5 +++--
cmd/frostfs-cli/modules/tree/move.go | 5 +++--
cmd/frostfs-cli/modules/tree/remove.go | 5 +++--
cmd/frostfs-cli/modules/tree/subtree.go | 5 +++--
10 files changed, 39 insertions(+), 28 deletions(-)
diff --git a/cmd/frostfs-cli/modules/tree/add.go b/cmd/frostfs-cli/modules/tree/add.go
index 0b8dc292f..019feb0ec 100644
--- a/cmd/frostfs-cli/modules/tree/add.go
+++ b/cmd/frostfs-cli/modules/tree/add.go
@@ -47,9 +47,10 @@ func add(cmd *cobra.Command, _ []string) {
meta, err := parseMeta(cmd)
commonCmd.ExitOnErr(cmd, "meta data parsing: %w", err)
- ctx := cmd.Context()
+ ctx, cancel := contextWithTimeout(cmd)
+ defer cancel()
- cli, err := _client(ctx)
+ cli, err := _client()
commonCmd.ExitOnErr(cmd, "failed to create client: %w", err)
rawCID := make([]byte, sha256.Size)
diff --git a/cmd/frostfs-cli/modules/tree/add_by_path.go b/cmd/frostfs-cli/modules/tree/add_by_path.go
index ea815dbfe..5d5b00b7d 100644
--- a/cmd/frostfs-cli/modules/tree/add_by_path.go
+++ b/cmd/frostfs-cli/modules/tree/add_by_path.go
@@ -50,9 +50,10 @@ func addByPath(cmd *cobra.Command, _ []string) {
commonCmd.ExitOnErr(cmd, "decode container ID string: %w", err)
tid, _ := cmd.Flags().GetString(treeIDFlagKey)
- ctx := cmd.Context()
+ ctx, cancel := contextWithTimeout(cmd)
+ defer cancel()
- cli, err := _client(ctx)
+ cli, err := _client()
commonCmd.ExitOnErr(cmd, "failed to create client: %w", err)
rawCID := make([]byte, sha256.Size)
diff --git a/cmd/frostfs-cli/modules/tree/client.go b/cmd/frostfs-cli/modules/tree/client.go
index 4f4f54657..4e0099f02 100644
--- a/cmd/frostfs-cli/modules/tree/client.go
+++ b/cmd/frostfs-cli/modules/tree/client.go
@@ -3,13 +3,14 @@ package tree
import (
"context"
"strings"
- "time"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree"
metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc"
tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc"
+ "github.com/spf13/cobra"
"github.com/spf13/viper"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
@@ -17,7 +18,7 @@ import (
// _client returns grpc Tree service client. Should be removed
// after making Tree API public.
-func _client(ctx context.Context) (tree.TreeServiceClient, error) {
+func _client() (tree.TreeServiceClient, error) {
var netAddr network.Address
err := netAddr.FromString(viper.GetString(commonflags.RPC))
if err != nil {
@@ -25,7 +26,6 @@ func _client(ctx context.Context) (tree.TreeServiceClient, error) {
}
opts := []grpc.DialOption{
- grpc.WithBlock(),
grpc.WithChainUnaryInterceptor(
metrics.NewUnaryClientInterceptor(),
tracing.NewUnaryClientInteceptor(),
@@ -40,12 +40,14 @@ func _client(ctx context.Context) (tree.TreeServiceClient, error) {
opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials()))
}
- // a default connection establishing timeout
- const defaultClientConnectTimeout = time.Second * 2
-
- ctx, cancel := context.WithTimeout(ctx, defaultClientConnectTimeout)
- cc, err := grpc.DialContext(ctx, netAddr.URIAddr(), opts...)
- cancel()
-
+ cc, err := grpc.NewClient(netAddr.URIAddr(), opts...)
return tree.NewTreeServiceClient(cc), err
}
+
+func contextWithTimeout(cmd *cobra.Command) (context.Context, context.CancelFunc) {
+ if timeout := viper.GetDuration(commonflags.Timeout); timeout > 0 {
+ common.PrintVerbose(cmd, "Set request timeout to %s.", timeout)
+ return context.WithTimeout(cmd.Context(), timeout)
+ }
+ return context.WithTimeout(cmd.Context(), commonflags.TimeoutDefault)
+}
diff --git a/cmd/frostfs-cli/modules/tree/get_by_path.go b/cmd/frostfs-cli/modules/tree/get_by_path.go
index f239066cd..7061723fd 100644
--- a/cmd/frostfs-cli/modules/tree/get_by_path.go
+++ b/cmd/frostfs-cli/modules/tree/get_by_path.go
@@ -50,9 +50,10 @@ func getByPath(cmd *cobra.Command, _ []string) {
commonCmd.ExitOnErr(cmd, "decode container ID string: %w", err)
tid, _ := cmd.Flags().GetString(treeIDFlagKey)
- ctx := cmd.Context()
+ ctx, cancel := contextWithTimeout(cmd)
+ defer cancel()
- cli, err := _client(ctx)
+ cli, err := _client()
commonCmd.ExitOnErr(cmd, "failed to create client: %w", err)
rawCID := make([]byte, sha256.Size)
diff --git a/cmd/frostfs-cli/modules/tree/get_op_log.go b/cmd/frostfs-cli/modules/tree/get_op_log.go
index b1b307f62..376aa8e8d 100644
--- a/cmd/frostfs-cli/modules/tree/get_op_log.go
+++ b/cmd/frostfs-cli/modules/tree/get_op_log.go
@@ -44,9 +44,10 @@ func getOpLog(cmd *cobra.Command, _ []string) {
commonCmd.ExitOnErr(cmd, "decode container ID string: %w", err)
tid, _ := cmd.Flags().GetString(treeIDFlagKey)
- ctx := cmd.Context()
+ ctx, cancel := contextWithTimeout(cmd)
+ defer cancel()
- cli, err := _client(ctx)
+ cli, err := _client()
commonCmd.ExitOnErr(cmd, "failed to create client: %w", err)
rawCID := make([]byte, sha256.Size)
diff --git a/cmd/frostfs-cli/modules/tree/healthcheck.go b/cmd/frostfs-cli/modules/tree/healthcheck.go
index f0506467e..b01bb2e77 100644
--- a/cmd/frostfs-cli/modules/tree/healthcheck.go
+++ b/cmd/frostfs-cli/modules/tree/healthcheck.go
@@ -26,9 +26,10 @@ func initHealthcheckCmd() {
func healthcheck(cmd *cobra.Command, _ []string) {
pk := key.GetOrGenerate(cmd)
- ctx := cmd.Context()
+ ctx, cancel := contextWithTimeout(cmd)
+ defer cancel()
- cli, err := _client(ctx)
+ cli, err := _client()
commonCmd.ExitOnErr(cmd, "failed to create client: %w", err)
req := &tree.HealthcheckRequest{
diff --git a/cmd/frostfs-cli/modules/tree/list.go b/cmd/frostfs-cli/modules/tree/list.go
index a25d066d5..f8c0e490f 100644
--- a/cmd/frostfs-cli/modules/tree/list.go
+++ b/cmd/frostfs-cli/modules/tree/list.go
@@ -38,9 +38,10 @@ func list(cmd *cobra.Command, _ []string) {
err := cnr.DecodeString(cidString)
commonCmd.ExitOnErr(cmd, "decode container ID string: %w", err)
- ctx := cmd.Context()
+ ctx, cancel := contextWithTimeout(cmd)
+ defer cancel()
- cli, err := _client(ctx)
+ cli, err := _client()
commonCmd.ExitOnErr(cmd, "failed to create client: %w", err)
rawCID := make([]byte, sha256.Size)
diff --git a/cmd/frostfs-cli/modules/tree/move.go b/cmd/frostfs-cli/modules/tree/move.go
index 24abbd650..dc807d752 100644
--- a/cmd/frostfs-cli/modules/tree/move.go
+++ b/cmd/frostfs-cli/modules/tree/move.go
@@ -45,9 +45,10 @@ func move(cmd *cobra.Command, _ []string) {
err := cnr.DecodeString(cidString)
commonCmd.ExitOnErr(cmd, "decode container ID string: %w", err)
- ctx := cmd.Context()
+ ctx, cancel := contextWithTimeout(cmd)
+ defer cancel()
- cli, err := _client(ctx)
+ cli, err := _client()
commonCmd.ExitOnErr(cmd, "failed to create client: %w", err)
rawCID := make([]byte, sha256.Size)
diff --git a/cmd/frostfs-cli/modules/tree/remove.go b/cmd/frostfs-cli/modules/tree/remove.go
index 74e9d9749..d0b6fab2f 100644
--- a/cmd/frostfs-cli/modules/tree/remove.go
+++ b/cmd/frostfs-cli/modules/tree/remove.go
@@ -41,9 +41,10 @@ func remove(cmd *cobra.Command, _ []string) {
err := cnr.DecodeString(cidString)
commonCmd.ExitOnErr(cmd, "decode container ID string: %w", err)
- ctx := cmd.Context()
+ ctx, cancel := contextWithTimeout(cmd)
+ defer cancel()
- cli, err := _client(ctx)
+ cli, err := _client()
commonCmd.ExitOnErr(cmd, "failed to create client: %w", err)
rawCID := make([]byte, sha256.Size)
diff --git a/cmd/frostfs-cli/modules/tree/subtree.go b/cmd/frostfs-cli/modules/tree/subtree.go
index e88ef79cb..83a8909b6 100644
--- a/cmd/frostfs-cli/modules/tree/subtree.go
+++ b/cmd/frostfs-cli/modules/tree/subtree.go
@@ -46,9 +46,10 @@ func getSubTree(cmd *cobra.Command, _ []string) {
err := cnr.DecodeString(cidString)
commonCmd.ExitOnErr(cmd, "decode container ID string: %w", err)
- ctx := cmd.Context()
+ ctx, cancel := contextWithTimeout(cmd)
+ defer cancel()
- cli, err := _client(ctx)
+ cli, err := _client()
commonCmd.ExitOnErr(cmd, "failed to create client: %w", err)
rawCID := make([]byte, sha256.Size)
From 54fe8383a4f274da8f1fd553097f3268c9f2d13a Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Fri, 13 Sep 2024 11:39:25 +0300
Subject: [PATCH 111/705] [#1374] tree: Use NewClient to create grpc connection
for sync
Created connection will be used to sync trees, so it is ok to defer
dial to the first RPC call.
Signed-off-by: Dmitrii Stepanov
---
pkg/services/tree/sync.go | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/pkg/services/tree/sync.go b/pkg/services/tree/sync.go
index be22074a5..5bbc93978 100644
--- a/pkg/services/tree/sync.go
+++ b/pkg/services/tree/sync.go
@@ -294,7 +294,7 @@ func (s *Service) synchronizeTree(ctx context.Context, cid cid.ID, from uint64,
return false
}
- cc, err := s.dialCtx(egCtx, a)
+ cc, err := s.createConnection(a)
if err != nil {
s.log.Warn(logs.TreeFailedToConnectForTreeSynchronization, zap.Error(err), zap.String("address", addr))
return false
@@ -332,8 +332,8 @@ func (s *Service) synchronizeTree(ctx context.Context, cid cid.ID, from uint64,
return from
}
-func (*Service) dialCtx(egCtx context.Context, a network.Address) (*grpc.ClientConn, error) {
- return grpc.DialContext(egCtx, a.URIAddr(),
+func (*Service) createConnection(a network.Address) (*grpc.ClientConn, error) {
+ return grpc.NewClient(a.URIAddr(),
grpc.WithChainUnaryInterceptor(
metrics.NewUnaryClientInterceptor(),
tracing_grpc.NewUnaryClientInteceptor(),
From 89d0435b1d59257d5bf15c926465193e53c11922 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Fri, 13 Sep 2024 11:59:17 +0300
Subject: [PATCH 112/705] [#1374] tree: Use NewClient to create grpc connection
in cache
Created grpc connection should be established, so perform Healthcheck request
to check connection is ok.
Signed-off-by: Dmitrii Stepanov
---
pkg/services/tree/cache.go | 33 +++++++++++++++++++++++++--------
pkg/services/tree/service.go | 2 +-
2 files changed, 26 insertions(+), 9 deletions(-)
diff --git a/pkg/services/tree/cache.go b/pkg/services/tree/cache.go
index 1be1c2f83..38501b852 100644
--- a/pkg/services/tree/cache.go
+++ b/pkg/services/tree/cache.go
@@ -2,6 +2,7 @@ package tree
import (
"context"
+ "crypto/ecdsa"
"errors"
"fmt"
"sync"
@@ -19,6 +20,7 @@ import (
type clientCache struct {
sync.Mutex
simplelru.LRU[string, cacheItem]
+ key *ecdsa.PrivateKey
}
type cacheItem struct {
@@ -34,13 +36,14 @@ const (
var errRecentlyFailed = errors.New("client has recently failed")
-func (c *clientCache) init() {
+func (c *clientCache) init(pk *ecdsa.PrivateKey) {
l, _ := simplelru.NewLRU(defaultClientCacheSize, func(_ string, value cacheItem) {
if conn := value.cc; conn != nil {
_ = conn.Close()
}
})
c.LRU = *l
+ c.key = pk
}
func (c *clientCache) get(ctx context.Context, netmapAddr string) (TreeServiceClient, error) {
@@ -63,7 +66,7 @@ func (c *clientCache) get(ctx context.Context, netmapAddr string) (TreeServiceCl
}
}
- cc, err := dialTreeService(ctx, netmapAddr)
+ cc, err := c.dialTreeService(ctx, netmapAddr)
lastTry := time.Now()
c.Lock()
@@ -81,14 +84,13 @@ func (c *clientCache) get(ctx context.Context, netmapAddr string) (TreeServiceCl
return NewTreeServiceClient(cc), nil
}
-func dialTreeService(ctx context.Context, netmapAddr string) (*grpc.ClientConn, error) {
+func (c *clientCache) dialTreeService(ctx context.Context, netmapAddr string) (*grpc.ClientConn, error) {
var netAddr network.Address
if err := netAddr.FromString(netmapAddr); err != nil {
return nil, err
}
opts := []grpc.DialOption{
- grpc.WithBlock(),
grpc.WithChainUnaryInterceptor(
metrics.NewUnaryClientInterceptor(),
tracing.NewUnaryClientInteceptor(),
@@ -103,9 +105,24 @@ func dialTreeService(ctx context.Context, netmapAddr string) (*grpc.ClientConn,
opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials()))
}
- ctx, cancel := context.WithTimeout(ctx, defaultClientConnectTimeout)
- cc, err := grpc.DialContext(ctx, netAddr.URIAddr(), opts...)
- cancel()
+ req := &HealthcheckRequest{
+ Body: &HealthcheckRequest_Body{},
+ }
+ if err := SignMessage(req, c.key); err != nil {
+ return nil, err
+ }
- return cc, err
+ cc, err := grpc.NewClient(netAddr.URIAddr(), opts...)
+ if err != nil {
+ return nil, err
+ }
+
+ ctx, cancel := context.WithTimeout(ctx, defaultClientConnectTimeout)
+ defer cancel()
+ // perform some request to check connection
+ if _, err := NewTreeServiceClient(cc).Healthcheck(ctx, req); err != nil {
+ _ = cc.Close()
+ return nil, err
+ }
+ return cc, nil
}
diff --git a/pkg/services/tree/service.go b/pkg/services/tree/service.go
index 875e47ecb..60bb1a6ad 100644
--- a/pkg/services/tree/service.go
+++ b/pkg/services/tree/service.go
@@ -65,7 +65,7 @@ func New(opts ...Option) *Service {
s.log = &logger.Logger{Logger: zap.NewNop()}
}
- s.cache.init()
+ s.cache.init(s.key)
s.closeCh = make(chan struct{})
s.replicateCh = make(chan movePair, s.replicatorChannelCapacity)
s.replicateLocalCh = make(chan applyOp)
From 2be1aa781dbbc2462ca717f3b763053fb8cd2810 Mon Sep 17 00:00:00 2001
From: Ekaterina Lebedeva
Date: Fri, 13 Sep 2024 15:44:23 +0300
Subject: [PATCH 113/705] [#1266] .forgejo: Make 'fumpt' job fail on changed
files
`gofumpt` always returns an exit code of 0, even when it finds
misformatted files. To make `fumpt` action behave as expected
we need to check if `gofumpt` changed any files.
Signed-off-by: Ekaterina Lebedeva
---
.forgejo/workflows/tests.yml | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/.forgejo/workflows/tests.yml b/.forgejo/workflows/tests.yml
index a908c6278..07ba5c268 100644
--- a/.forgejo/workflows/tests.yml
+++ b/.forgejo/workflows/tests.yml
@@ -106,4 +106,6 @@ jobs:
run: make fumpt-install
- name: Run gofumpt
- run: make fumpt
+ run: |
+ make fumpt
+ git diff --exit-code --quiet
From 74a6a1da7fb6a895583d72f188f31b8f067ce513 Mon Sep 17 00:00:00 2001
From: Aleksey Savaitan
Date: Thu, 12 Sep 2024 17:40:10 +0300
Subject: [PATCH 114/705] [#1361] add root ca cert for telemetry configuration
Signed-off-by: Aleksey Savaitan
---
cmd/frostfs-node/config.go | 6 ++-
cmd/frostfs-node/config/tracing/config.go | 23 +++++++-
cmd/frostfs-node/tracing.go | 10 ++--
config/example/node.env | 1 +
config/example/node.json | 3 +-
config/example/node.yaml | 1 +
go.mod | 30 +++++------
go.sum | 64 +++++++++++------------
8 files changed, 84 insertions(+), 54 deletions(-)
diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go
index f98f1c1a3..bb6580a40 100644
--- a/cmd/frostfs-node/config.go
+++ b/cmd/frostfs-node/config.go
@@ -1299,7 +1299,11 @@ func (c *cfg) reloadConfig(ctx context.Context) {
}})
components = append(components, dCmp{"pools", c.reloadPools})
components = append(components, dCmp{"tracing", func() error {
- updated, err := tracing.Setup(ctx, *tracingconfig.ToTracingConfig(c.appCfg))
+ traceConfig, err := tracingconfig.ToTracingConfig(c.appCfg)
+ if err != nil {
+ return err
+ }
+ updated, err := tracing.Setup(ctx, *traceConfig)
if updated {
c.log.Info(logs.FrostFSNodeTracingConfigationUpdated)
}
diff --git a/cmd/frostfs-node/config/tracing/config.go b/cmd/frostfs-node/config/tracing/config.go
index e846be158..8544c672c 100644
--- a/cmd/frostfs-node/config/tracing/config.go
+++ b/cmd/frostfs-node/config/tracing/config.go
@@ -1,6 +1,11 @@
package tracing
import (
+ "crypto/x509"
+ "errors"
+ "fmt"
+ "os"
+
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
"git.frostfs.info/TrueCloudLab/frostfs-node/misc"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
@@ -11,8 +16,8 @@ const (
)
// ToTracingConfig extracts tracing config.
-func ToTracingConfig(c *config.Config) *tracing.Config {
- return &tracing.Config{
+func ToTracingConfig(c *config.Config) (*tracing.Config, error) {
+ conf := &tracing.Config{
Enabled: config.BoolSafe(c.Sub(subsection), "enabled"),
Exporter: tracing.Exporter(config.StringSafe(c.Sub(subsection), "exporter")),
Endpoint: config.StringSafe(c.Sub(subsection), "endpoint"),
@@ -20,6 +25,20 @@ func ToTracingConfig(c *config.Config) *tracing.Config {
InstanceID: getInstanceIDOrDefault(c),
Version: misc.Version,
}
+
+ if trustedCa := config.StringSafe(c.Sub(subsection), "trusted_ca"); trustedCa != "" {
+ caBytes, err := os.ReadFile(trustedCa)
+ if err != nil {
+ return nil, fmt.Errorf("cannot read trusted ca cert by path: %w", err)
+ }
+ certPool := x509.NewCertPool()
+ ok := certPool.AppendCertsFromPEM(caBytes)
+ if !ok {
+ return nil, errors.New("can't fill cert pool by ca cert")
+ }
+ conf.ServerCaCertPool = certPool
+ }
+ return conf, nil
}
func getInstanceIDOrDefault(c *config.Config) string {
diff --git a/cmd/frostfs-node/tracing.go b/cmd/frostfs-node/tracing.go
index 675c31374..f550dd882 100644
--- a/cmd/frostfs-node/tracing.go
+++ b/cmd/frostfs-node/tracing.go
@@ -11,11 +11,15 @@ import (
)
func initTracing(ctx context.Context, c *cfg) {
- conf := tracingconfig.ToTracingConfig(c.appCfg)
-
- _, err := tracing.Setup(ctx, *conf)
+ conf, err := tracingconfig.ToTracingConfig(c.appCfg)
if err != nil {
c.log.Error(logs.FrostFSNodeFailedInitTracing, zap.Error(err))
+ return
+ }
+ _, err = tracing.Setup(ctx, *conf)
+ if err != nil {
+ c.log.Error(logs.FrostFSNodeFailedInitTracing, zap.Error(err))
+ return
}
c.closers = append(c.closers, closer{
diff --git a/config/example/node.env b/config/example/node.env
index c3fa85c13..030a79934 100644
--- a/config/example/node.env
+++ b/config/example/node.env
@@ -199,6 +199,7 @@ FROSTFS_STORAGE_SHARD_1_GC_REMOVER_SLEEP_INTERVAL=5m
FROSTFS_TRACING_ENABLED=true
FROSTFS_TRACING_ENDPOINT="localhost"
FROSTFS_TRACING_EXPORTER="otlp_grpc"
+FROSTFS_TRACING_TRUSTED_CA=""
FROSTFS_RUNTIME_SOFT_MEMORY_LIMIT=1073741824
diff --git a/config/example/node.json b/config/example/node.json
index d7187250b..4e6d239fe 100644
--- a/config/example/node.json
+++ b/config/example/node.json
@@ -255,7 +255,8 @@
"tracing": {
"enabled": true,
"endpoint": "localhost:9090",
- "exporter": "otlp_grpc"
+ "exporter": "otlp_grpc",
+ "trusted_ca": "/etc/ssl/tracing.pem"
},
"runtime": {
"soft_memory_limit": 1073741824
diff --git a/config/example/node.yaml b/config/example/node.yaml
index 776b22bd0..5a8e6a2a4 100644
--- a/config/example/node.yaml
+++ b/config/example/node.yaml
@@ -231,6 +231,7 @@ tracing:
enabled: true
exporter: "otlp_grpc"
endpoint: "localhost"
+ trusted_ca: ""
runtime:
soft_memory_limit: 1gb
diff --git a/go.mod b/go.mod
index 78fefc9ae..621d2e85d 100644
--- a/go.mod
+++ b/go.mod
@@ -8,7 +8,7 @@ require (
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d
- git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20231101111734-b3ad3335ff65
+ git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20240909114314-666d326cc573
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240903093628-8f751d9dd0ad
git.frostfs.info/TrueCloudLab/hrw v1.2.1
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240814080254-96225afacb88
@@ -40,15 +40,15 @@ require (
github.com/ssgreg/journald v1.0.0
github.com/stretchr/testify v1.9.0
go.etcd.io/bbolt v1.3.10
- go.opentelemetry.io/otel v1.24.0
- go.opentelemetry.io/otel/trace v1.24.0
+ go.opentelemetry.io/otel v1.28.0
+ go.opentelemetry.io/otel/trace v1.28.0
go.uber.org/zap v1.27.0
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56
golang.org/x/sync v0.7.0
golang.org/x/sys v0.22.0
golang.org/x/term v0.21.0
google.golang.org/grpc v1.66.2
- google.golang.org/protobuf v1.34.1
+ google.golang.org/protobuf v1.34.2
gopkg.in/yaml.v3 v3.0.1
)
@@ -63,7 +63,7 @@ require (
github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/bits-and-blooms/bitset v1.13.0 // indirect
- github.com/cenkalti/backoff/v4 v4.2.1 // indirect
+ github.com/cenkalti/backoff/v4 v4.3.0 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/consensys/bavard v0.1.13 // indirect
github.com/consensys/gnark-crypto v0.12.2-0.20231222162921-eb75782795d2 // indirect
@@ -73,13 +73,13 @@ require (
github.com/fsnotify/fsnotify v1.7.0 // indirect
github.com/gdamore/encoding v1.0.0 // indirect
github.com/go-fed/httpsig v1.1.0 // indirect
- github.com/go-logr/logr v1.4.1 // indirect
+ github.com/go-logr/logr v1.4.2 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/golang/snappy v0.0.4 // indirect
github.com/gorilla/websocket v1.5.1 // indirect
github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.0 // indirect
github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.1 // indirect
- github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 // indirect
+ github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect
github.com/hashicorp/go-version v1.6.0 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
github.com/holiman/uint256 v1.2.4 // indirect
@@ -115,18 +115,18 @@ require (
github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect
github.com/twmb/murmur3 v1.1.8 // indirect
github.com/urfave/cli v1.22.14 // indirect
- go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0 // indirect
- go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.22.0 // indirect
- go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.22.0 // indirect
- go.opentelemetry.io/otel/metric v1.24.0 // indirect
- go.opentelemetry.io/otel/sdk v1.22.0 // indirect
- go.opentelemetry.io/proto/otlp v1.1.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 // indirect
+ go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0 // indirect
+ go.opentelemetry.io/otel/metric v1.28.0 // indirect
+ go.opentelemetry.io/otel/sdk v1.28.0 // indirect
+ go.opentelemetry.io/proto/otlp v1.3.1 // indirect
go.uber.org/multierr v1.11.0 // indirect
golang.org/x/crypto v0.24.0 // indirect
golang.org/x/net v0.26.0 // indirect
golang.org/x/text v0.16.0 // indirect
- google.golang.org/genproto/googleapis/api v0.0.0-20240604185151-ef581f913117 // indirect
- google.golang.org/genproto/googleapis/rpc v0.0.0-20240604185151-ef581f913117 // indirect
+ google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
lukechampine.com/blake3 v1.2.1 // indirect
rsc.io/tmplfunc v0.0.3 // indirect
diff --git a/go.sum b/go.sum
index 2147f8988..4d21d9bca 100644
--- a/go.sum
+++ b/go.sum
@@ -8,8 +8,8 @@ git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSV
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0/go.mod h1:RUIKZATQLJ+TaYQa60X2fTDwfuhMfm8Ar60bQ5fr+vU=
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d h1:uJ/wvuMdepbkaV8XMS5uN9B0FQWMep0CttSuDZiDhq0=
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d/go.mod h1:7ZZq8iguY7qFsXajdHGmZd2AW4QbucyrJwhbsRfOfek=
-git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20231101111734-b3ad3335ff65 h1:PaZ8GpnUoXxUoNsc1qp36bT2u7FU+neU4Jn9cl8AWqI=
-git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20231101111734-b3ad3335ff65/go.mod h1:6aAX80dvJ3r5fjN9CzzPglRptoiPgIC9KFGGsUA+1Hw=
+git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20240909114314-666d326cc573 h1:6qCcm1oqFbmf9C5AauXzrL5OPGnTbI9HoB/jAtD9274=
+git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20240909114314-666d326cc573/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g=
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240903093628-8f751d9dd0ad h1:cJGK/bXSF/0KMr6zkIy06qekQhQRU7eYzh+lWdQ0U8w=
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240903093628-8f751d9dd0ad/go.mod h1:I4GzeEnftO++N2JHQn9QR88eV0rjQkGqlUCNMnCz1CY=
git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc=
@@ -33,8 +33,8 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bits-and-blooms/bitset v1.13.0 h1:bAQ9OPNFYbGHV6Nez0tmNI0RiEu7/hxlYJRUA0wFAVE=
github.com/bits-and-blooms/bitset v1.13.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
-github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
-github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
+github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
+github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cheggaaa/pb v1.0.29 h1:FckUN5ngEk2LpvuG0fw1GEFx6LtyY2pWI/Z2QgCnEYo=
@@ -82,8 +82,8 @@ github.com/gdamore/tcell/v2 v2.7.4/go.mod h1:dSXtXTSK0VsW1biw65DZLZ2NKr7j0qP/0J7
github.com/go-fed/httpsig v1.1.0 h1:9M+hb0jkEICD8/cAiNqEB66R87tTINszBRTjwjQzWcI=
github.com/go-fed/httpsig v1.1.0/go.mod h1:RCMrTZvN1bJYtofsG4rd5NaO5obxQ5xBkdiS7xsT7bM=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
-github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
+github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-pkgz/expirable-cache/v3 v3.0.0 h1:u3/gcu3sabLYiTCevoRKv+WzjIn5oo7P8XtiXBeRDLw=
@@ -116,8 +116,8 @@ github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.0 h1:f4tg
github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.0/go.mod h1:hKAkSgNkL0FII46ZkJcpVEAai4KV+swlIWCKfekd1pA=
github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.1 h1:HcUWd006luQPljE73d5sk+/VgYPGUReEVz2y1/qylwY=
github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.1/go.mod h1:w9Y7gY31krpLmrVU5ZPG9H7l9fZuRu5/3R3S3FMtVQ4=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 h1:Wqo399gCIufwto+VfwCSvsnfGpF/w5E9CNxSwbpD6No=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0/go.mod h1:qmOFXW2epJhM0qSnUUYpldc7gVz2KMQwJ/QYCDIa7XU=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k=
github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek=
github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
@@ -229,8 +229,8 @@ github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJ
github.com/rivo/uniseg v0.4.3/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
-github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
-github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=
+github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
+github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/sagikazarmark/locafero v0.6.0 h1:ON7AQg37yzcRPU69mt7gwhFEBwxI6P9T4Qu3N51bwOk=
@@ -278,22 +278,22 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
go.etcd.io/bbolt v1.3.10 h1:+BqfJTcCzTItrop8mq/lbzL8wSGtj94UO/3U31shqG0=
go.etcd.io/bbolt v1.3.10/go.mod h1:bK3UQLPJZly7IlNmV7uVHJDxfe5aK9Ll93e/74Y9oEQ=
-go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo=
-go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0 h1:9M3+rhx7kZCIQQhQRYaZCdNu1V73tm4TvXs2ntl98C4=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0/go.mod h1:noq80iT8rrHP1SfybmPiRGc9dc5M8RPmGvtwo7Oo7tc=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.22.0 h1:H2JFgRcGiyHg7H7bwcwaQJYrNFqCqrbTQ8K4p1OvDu8=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.22.0/go.mod h1:WfCWp1bGoYK8MeULtI15MmQVczfR+bFkk0DF3h06QmQ=
-go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.22.0 h1:zr8ymM5OWWjjiWRzwTfZ67c905+2TMHYp2lMJ52QTyM=
-go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.22.0/go.mod h1:sQs7FT2iLVJ+67vYngGJkPe1qr39IzaBzaj9IDNNY8k=
-go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI=
-go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco=
-go.opentelemetry.io/otel/sdk v1.22.0 h1:6coWHw9xw7EfClIC/+O31R8IY3/+EiRFHevmHafB2Gw=
-go.opentelemetry.io/otel/sdk v1.22.0/go.mod h1:iu7luyVGYovrRpe2fmj3CVKouQNdTOkxtLzPvPz1DOc=
-go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI=
-go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU=
-go.opentelemetry.io/proto/otlp v1.1.0 h1:2Di21piLrCqJ3U3eXGCTPHE9R8Nh+0uglSnOyxikMeI=
-go.opentelemetry.io/proto/otlp v1.1.0/go.mod h1:GpBHCBWiqvVLDqmHZsoMM3C5ySeKTC7ej/RNTae6MdY=
+go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo=
+go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 h1:R3X6ZXmNPRR8ul6i3WgFURCHzaXjHdm0karRG/+dj3s=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0/go.mod h1:QWFXnDavXWwMx2EEcZsf3yxgEKAqsxQ+Syjp+seyInw=
+go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0 h1:EVSnY9JbEEW92bEkIYOVMw4q1WJxIAGoFTrtYOzWuRQ=
+go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0/go.mod h1:Ea1N1QQryNXpCD0I1fdLibBAIpQuBkznMmkdKrapk1Y=
+go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q=
+go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s=
+go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE=
+go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg=
+go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g=
+go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI=
+go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0=
+go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
@@ -393,10 +393,10 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
-google.golang.org/genproto/googleapis/api v0.0.0-20240604185151-ef581f913117 h1:+rdxYoE3E5htTEWIe15GlN6IfvbURM//Jt0mmkmm6ZU=
-google.golang.org/genproto/googleapis/api v0.0.0-20240604185151-ef581f913117/go.mod h1:OimBR/bc1wPO9iV4NC2bpyjy3VnAwZh5EBPQdtaE5oo=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240604185151-ef581f913117 h1:1GBuWVLM/KMVUv1t1En5Gs+gFZCNd360GGb4sSxtrhU=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240604185151-ef581f913117/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0=
+google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 h1:0+ozOGcrp+Y8Aq8TLNN2Aliibms5LEzsq99ZZmAGYm0=
+google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094/go.mod h1:fJ/e3If/Q67Mj99hin0hMhiNyCRmt6BQ2aWIJshUSJw=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 h1:BwIjyKYGsK9dMCBOorzRri8MQwmi7mT9rGHsCEinZkA=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY=
google.golang.org/grpc v1.66.2 h1:3QdXkuq3Bkh7w+ywLdLvM56cmGvQHUMZpiCzt6Rqaoo=
google.golang.org/grpc v1.66.2/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
@@ -407,8 +407,8 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg=
-google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
+google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
+google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
From 96308a26c6ab73c416464e3da2f0e859915abc93 Mon Sep 17 00:00:00 2001
From: Aleksey Savaitan
Date: Thu, 12 Sep 2024 17:42:20 +0300
Subject: [PATCH 115/705] [#1361] linter: fix funlen
Signed-off-by: Aleksey Savaitan
---
cmd/frostfs-node/config.go | 55 +++++++++++++++++++++-----------------
1 file changed, 31 insertions(+), 24 deletions(-)
diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go
index bb6580a40..c0019d36a 100644
--- a/cmd/frostfs-node/config.go
+++ b/cmd/frostfs-node/config.go
@@ -1278,7 +1278,6 @@ func (c *cfg) reloadConfig(ctx context.Context) {
// all the components are expected to support
// Logger's dynamic reconfiguration approach
- var components []dCmp
// Logger
@@ -1288,6 +1287,36 @@ func (c *cfg) reloadConfig(ctx context.Context) {
return
}
+ components := c.getComponents(ctx, logPrm)
+
+ // Storage Engine
+
+ var rcfg engine.ReConfiguration
+ for _, optsWithID := range c.shardOpts(ctx) {
+ rcfg.AddShard(optsWithID.configID, append(optsWithID.shOpts, shard.WithTombstoneSource(c.createTombstoneSource())))
+ }
+
+ err = c.cfgObject.cfgLocalStorage.localStorage.Reload(ctx, rcfg)
+ if err != nil {
+ c.log.Error(logs.FrostFSNodeStorageEngineConfigurationUpdate, zap.Error(err))
+ return
+ }
+
+ for _, component := range components {
+ err = component.reloadFunc()
+ if err != nil {
+ c.log.Error(logs.FrostFSNodeUpdatedConfigurationApplying,
+ zap.String("component", component.name),
+ zap.Error(err))
+ }
+ }
+
+ c.log.Info(logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully)
+}
+
+func (c *cfg) getComponents(ctx context.Context, logPrm *logger.Prm) []dCmp {
+ var components []dCmp
+
components = append(components, dCmp{"logger", logPrm.Reload})
components = append(components, dCmp{"runtime", func() error {
setRuntimeParameters(c)
@@ -1321,29 +1350,7 @@ func (c *cfg) reloadConfig(ctx context.Context) {
components = append(components, dCmp{cmp.name, func() error { return cmp.reload(ctx) }})
}
- // Storage Engine
-
- var rcfg engine.ReConfiguration
- for _, optsWithID := range c.shardOpts(ctx) {
- rcfg.AddShard(optsWithID.configID, append(optsWithID.shOpts, shard.WithTombstoneSource(c.createTombstoneSource())))
- }
-
- err = c.cfgObject.cfgLocalStorage.localStorage.Reload(ctx, rcfg)
- if err != nil {
- c.log.Error(logs.FrostFSNodeStorageEngineConfigurationUpdate, zap.Error(err))
- return
- }
-
- for _, component := range components {
- err = component.reloadFunc()
- if err != nil {
- c.log.Error(logs.FrostFSNodeUpdatedConfigurationApplying,
- zap.String("component", component.name),
- zap.Error(err))
- }
- }
-
- c.log.Info(logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully)
+ return components
}
func (c *cfg) reloadPools() error {
From ea48e928c8d6c54662c72bd650b57a8b897e7879 Mon Sep 17 00:00:00 2001
From: Aleksey Savchuk
Date: Mon, 16 Sep 2024 10:45:26 +0300
Subject: [PATCH 116/705] [#1366] logger: Make timestamp prepending optional
Signed-off-by: Aleksey Savchuk
---
pkg/util/logger/logger.go | 17 +++++++++++++++--
1 file changed, 15 insertions(+), 2 deletions(-)
diff --git a/pkg/util/logger/logger.go b/pkg/util/logger/logger.go
index e67afb36b..4b60f02de 100644
--- a/pkg/util/logger/logger.go
+++ b/pkg/util/logger/logger.go
@@ -40,6 +40,9 @@ type Prm struct {
// do not support runtime rereading
dest string
+
+ // PrependTimestamp specifies whether to prepend a timestamp in the log
+ PrependTimestamp bool
}
const (
@@ -116,11 +119,16 @@ func newConsoleLogger(prm *Prm) (*Logger, error) {
c := zap.NewProductionConfig()
c.Level = lvl
c.Encoding = "console"
- c.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder
if prm.SamplingHook != nil {
c.Sampling.Hook = prm.SamplingHook
}
+ if prm.PrependTimestamp {
+ c.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder
+ } else {
+ c.EncoderConfig.TimeKey = ""
+ }
+
lZap, err := c.Build(
zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel)),
)
@@ -140,11 +148,16 @@ func newJournaldLogger(prm *Prm) (*Logger, error) {
c := zap.NewProductionConfig()
c.Level = lvl
c.Encoding = "console"
- c.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder
if prm.SamplingHook != nil {
c.Sampling.Hook = prm.SamplingHook
}
+ if prm.PrependTimestamp {
+ c.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder
+ } else {
+ c.EncoderConfig.TimeKey = ""
+ }
+
encoder := zapjournald.NewPartialEncoder(zapcore.NewConsoleEncoder(c.EncoderConfig), zapjournald.SyslogFields)
core := zapjournald.NewCore(lvl, encoder, &journald.Journal{}, zapjournald.SyslogFields)
From d4bec24c9fa7ede0fcc4d5b879316f0a54c61bb9 Mon Sep 17 00:00:00 2001
From: Aleksey Savchuk
Date: Mon, 16 Sep 2024 10:46:26 +0300
Subject: [PATCH 117/705] [#1366] node, ir: Support `timestamp` config option,
update tests
Signed-off-by: Aleksey Savchuk
---
cmd/frostfs-ir/config.go | 2 ++
cmd/frostfs-ir/defaults.go | 1 +
cmd/frostfs-ir/main.go | 2 ++
cmd/frostfs-node/config.go | 3 +++
cmd/frostfs-node/config/logger/config.go | 8 ++++++++
cmd/frostfs-node/config/logger/config_test.go | 2 ++
config/example/ir.env | 1 +
config/example/ir.yaml | 1 +
config/example/node.env | 1 +
config/example/node.json | 3 ++-
config/example/node.yaml | 1 +
11 files changed, 24 insertions(+), 1 deletion(-)
diff --git a/cmd/frostfs-ir/config.go b/cmd/frostfs-ir/config.go
index 4eaac845c..137e764ed 100644
--- a/cmd/frostfs-ir/config.go
+++ b/cmd/frostfs-ir/config.go
@@ -41,6 +41,8 @@ func reloadConfig() error {
if err != nil {
return err
}
+ logPrm.PrependTimestamp = cfg.GetBool("logger.timestamp")
+
return logPrm.Reload()
}
diff --git a/cmd/frostfs-ir/defaults.go b/cmd/frostfs-ir/defaults.go
index e703301ae..899918d22 100644
--- a/cmd/frostfs-ir/defaults.go
+++ b/cmd/frostfs-ir/defaults.go
@@ -9,6 +9,7 @@ import (
func defaultConfiguration(cfg *viper.Viper) {
cfg.SetDefault("logger.level", "info")
cfg.SetDefault("logger.destination", "stdout")
+ cfg.SetDefault("logger.timestamp", false)
setPprofDefaults(cfg)
diff --git a/cmd/frostfs-ir/main.go b/cmd/frostfs-ir/main.go
index 02936ae78..4bc5923a0 100644
--- a/cmd/frostfs-ir/main.go
+++ b/cmd/frostfs-ir/main.go
@@ -79,6 +79,8 @@ func main() {
)
exitErr(err)
logPrm.SamplingHook = metrics.LogMetrics().GetSamplingHook()
+ logPrm.PrependTimestamp = cfg.GetBool("logger.timestamp")
+
log, err = logger.NewLogger(logPrm)
exitErr(err)
diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go
index c0019d36a..ed3a65c25 100644
--- a/cmd/frostfs-node/config.go
+++ b/cmd/frostfs-node/config.go
@@ -102,6 +102,7 @@ type applicationConfiguration struct {
LoggerCfg struct {
level string
destination string
+ timestamp bool
}
EngineCfg struct {
@@ -220,6 +221,7 @@ func (a *applicationConfiguration) readConfig(c *config.Config) error {
a.LoggerCfg.level = loggerconfig.Level(c)
a.LoggerCfg.destination = loggerconfig.Destination(c)
+ a.LoggerCfg.timestamp = loggerconfig.Timestamp(c)
// Storage Engine
@@ -1023,6 +1025,7 @@ func (c *cfg) loggerPrm() (*logger.Prm, error) {
// not expected since validation should be performed before
panic("incorrect log destination format: " + c.LoggerCfg.destination)
}
+ c.dynamicConfiguration.logger.PrependTimestamp = c.LoggerCfg.timestamp
return c.dynamicConfiguration.logger, nil
}
diff --git a/cmd/frostfs-node/config/logger/config.go b/cmd/frostfs-node/config/logger/config.go
index 378b9d793..ba9eeea2b 100644
--- a/cmd/frostfs-node/config/logger/config.go
+++ b/cmd/frostfs-node/config/logger/config.go
@@ -52,6 +52,14 @@ func Destination(c *config.Config) string {
return DestinationDefault
}
+// Timestamp returns the value of "timestamp" config parameter
+// from "logger" section.
+//
+// Returns false if the value isn't specified.
+func Timestamp(c *config.Config) bool {
+ return config.BoolSafe(c.Sub(subsection), "timestamp")
+}
+
// ToLokiConfig extracts loki config.
func ToLokiConfig(c *config.Config) loki.Config {
hostname, _ := os.Hostname()
diff --git a/cmd/frostfs-node/config/logger/config_test.go b/cmd/frostfs-node/config/logger/config_test.go
index 3587a0ddb..ffe8ac693 100644
--- a/cmd/frostfs-node/config/logger/config_test.go
+++ b/cmd/frostfs-node/config/logger/config_test.go
@@ -13,6 +13,7 @@ func TestLoggerSection_Level(t *testing.T) {
t.Run("defaults", func(t *testing.T) {
require.Equal(t, loggerconfig.LevelDefault, loggerconfig.Level(configtest.EmptyConfig()))
require.Equal(t, loggerconfig.DestinationDefault, loggerconfig.Destination(configtest.EmptyConfig()))
+ require.Equal(t, false, loggerconfig.Timestamp(configtest.EmptyConfig()))
})
const path = "../../../../config/example/node"
@@ -20,6 +21,7 @@ func TestLoggerSection_Level(t *testing.T) {
fileConfigTest := func(c *config.Config) {
require.Equal(t, "debug", loggerconfig.Level(c))
require.Equal(t, "journald", loggerconfig.Destination(c))
+ require.Equal(t, true, loggerconfig.Timestamp(c))
}
configtest.ForEachFileType(path, fileConfigTest)
diff --git a/config/example/ir.env b/config/example/ir.env
index 3f9530ab6..7234a4b32 100644
--- a/config/example/ir.env
+++ b/config/example/ir.env
@@ -1,4 +1,5 @@
FROSTFS_IR_LOGGER_LEVEL=info
+FROSTFS_IR_LOGGER_TIMESTAMP=true
FROSTFS_IR_WALLET_PATH=/path/to/wallet.json
FROSTFS_IR_WALLET_ADDRESS=NUHtW3eM6a4mmFCgyyr4rj4wygsTKB88XX
diff --git a/config/example/ir.yaml b/config/example/ir.yaml
index 401328e72..4c64f088b 100644
--- a/config/example/ir.yaml
+++ b/config/example/ir.yaml
@@ -2,6 +2,7 @@
logger:
level: info # Logger level: one of "debug", "info" (default), "warn", "error", "dpanic", "panic", "fatal"
+ timestamp: true
wallet:
path: /path/to/wallet.json # Path to NEP-6 NEO wallet file
diff --git a/config/example/node.env b/config/example/node.env
index 030a79934..6618a981a 100644
--- a/config/example/node.env
+++ b/config/example/node.env
@@ -1,5 +1,6 @@
FROSTFS_LOGGER_LEVEL=debug
FROSTFS_LOGGER_DESTINATION=journald
+FROSTFS_LOGGER_TIMESTAMP=true
FROSTFS_PPROF_ENABLED=true
FROSTFS_PPROF_ADDRESS=localhost:6060
diff --git a/config/example/node.json b/config/example/node.json
index 4e6d239fe..0d100ed80 100644
--- a/config/example/node.json
+++ b/config/example/node.json
@@ -1,7 +1,8 @@
{
"logger": {
"level": "debug",
- "destination": "journald"
+ "destination": "journald",
+ "timestamp": true
},
"pprof": {
"enabled": true,
diff --git a/config/example/node.yaml b/config/example/node.yaml
index 5a8e6a2a4..86be35ba8 100644
--- a/config/example/node.yaml
+++ b/config/example/node.yaml
@@ -1,6 +1,7 @@
logger:
level: debug # logger level: one of "debug", "info" (default), "warn", "error", "dpanic", "panic", "fatal"
destination: journald # logger destination: one of "stdout" (default), "journald"
+ timestamp: true
systemdnotify:
enabled: true
From b807d8c40066a90accf7dbdb5d8f31f6069f551c Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Wed, 18 Sep 2024 12:20:55 +0300
Subject: [PATCH 118/705] [#1382] go.mod: Upgrade sdk-go and api-go versions
Signed-off-by: Dmitrii Stepanov
---
go.mod | 4 ++--
go.sum | 8 ++++----
2 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/go.mod b/go.mod
index 621d2e85d..78dce0131 100644
--- a/go.mod
+++ b/go.mod
@@ -4,12 +4,12 @@ go 1.22
require (
code.gitea.io/sdk/gitea v0.17.1
- git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240902111049-c11f50efeccb
+ git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240916093537-13fa0da3741e
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20240909114314-666d326cc573
- git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240903093628-8f751d9dd0ad
+ git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240916123151-6009d089fc69
git.frostfs.info/TrueCloudLab/hrw v1.2.1
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240814080254-96225afacb88
git.frostfs.info/TrueCloudLab/tzhash v1.8.0
diff --git a/go.sum b/go.sum
index 4d21d9bca..dd0e31088 100644
--- a/go.sum
+++ b/go.sum
@@ -1,7 +1,7 @@
code.gitea.io/sdk/gitea v0.17.1 h1:3jCPOG2ojbl8AcfaUCRYLT5MUcBMFwS0OSK2mA5Zok8=
code.gitea.io/sdk/gitea v0.17.1/go.mod h1:aCnBqhHpoEWA180gMbaCtdX9Pl6BWBAuuP2miadoTNM=
-git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240902111049-c11f50efeccb h1:p9ByDsw+H6p6LyYSx8LKFtAG/oPKQpDVMNfjPqdevTw=
-git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240902111049-c11f50efeccb/go.mod h1:BDnEpkKMykCS8u1nLzR6SgNzCv6885RWlo5TnravQuI=
+git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240916093537-13fa0da3741e h1:740ABnOBYx4o6jxULHdSSnVW2fYIO35ohg+Uz59sxd0=
+git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240916093537-13fa0da3741e/go.mod h1:F5GS7hRb62PUy5sTYDC4ajVdeffoAfjHSSHTKUJEaYU=
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e h1:kcBqZBiFIUBATUqEuvVigtkJJWQ2Gug/eYXn967o3M4=
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e/go.mod h1:F/fe1OoIDKr5Bz99q4sriuHDuf3aZefZy9ZsCqEtgxc=
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSVCB8JNSfPG7Uk4r2oSk=
@@ -10,8 +10,8 @@ git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d/go.mod h1:7ZZq8iguY7qFsXajdHGmZd2AW4QbucyrJwhbsRfOfek=
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20240909114314-666d326cc573 h1:6qCcm1oqFbmf9C5AauXzrL5OPGnTbI9HoB/jAtD9274=
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20240909114314-666d326cc573/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g=
-git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240903093628-8f751d9dd0ad h1:cJGK/bXSF/0KMr6zkIy06qekQhQRU7eYzh+lWdQ0U8w=
-git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240903093628-8f751d9dd0ad/go.mod h1:I4GzeEnftO++N2JHQn9QR88eV0rjQkGqlUCNMnCz1CY=
+git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240916123151-6009d089fc69 h1:DJExzndXf6hztcQ8zHlBOJV/+FA6k2FpRGUcTDWqq2M=
+git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240916123151-6009d089fc69/go.mod h1:GeNpo12HcEW4J412sH5yf8xFYapxlrt5fcYzRwg0Ino=
git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc=
git.frostfs.info/TrueCloudLab/hrw v1.2.1/go.mod h1:C1Ygde2n843yTZEQ0FP69jYiuaYV0kriLvP4zm8JuvM=
git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20240726093631-5481339d6928 h1:LK3mCkNZkY48eBA9jnk1N0eQZLsZhOG+XYw4EBoKUjM=
From 1e7f9909dade3ed905c07930c9a9f1bd9a8323b4 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Wed, 18 Sep 2024 12:21:53 +0300
Subject: [PATCH 119/705] [#1382] policer: Replace deprecated methods
Signed-off-by: Dmitrii Stepanov
---
pkg/services/policer/check.go | 2 +-
pkg/services/policer/ec.go | 2 +-
pkg/services/policer/policer_test.go | 2 +-
3 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/pkg/services/policer/check.go b/pkg/services/policer/check.go
index bf67ec4d4..06282bd8d 100644
--- a/pkg/services/policer/check.go
+++ b/pkg/services/policer/check.go
@@ -110,7 +110,7 @@ func (p *Policer) processRepNodes(ctx context.Context, requirements *placementRe
requirements.needLocalCopy = true
shortage--
- } else if nodes[i].IsMaintenance() {
+ } else if nodes[i].Status().IsMaintenance() {
shortage, uncheckedCopies = p.handleMaintenance(nodes[i], checkedNodes, shortage, uncheckedCopies)
} else {
if status := checkedNodes.processStatus(nodes[i]); status.Processed() {
diff --git a/pkg/services/policer/ec.go b/pkg/services/policer/ec.go
index 61a65fc21..e822d1c09 100644
--- a/pkg/services/policer/ec.go
+++ b/pkg/services/policer/ec.go
@@ -106,7 +106,7 @@ func (p *Policer) processECChunk(ctx context.Context, objInfo objectcore.Info, n
validPlacement: true,
}
}
- if requiredNode.IsMaintenance() {
+ if requiredNode.Status().IsMaintenance() {
// consider maintenance mode has object, but do not drop local copy
p.log.Debug(logs.PolicerConsiderNodeUnderMaintenanceAsOK, zap.String("node", netmap.StringifyPublicKey(requiredNode)))
return ecChunkProcessResult{}
diff --git a/pkg/services/policer/policer_test.go b/pkg/services/policer/policer_test.go
index e353ea428..4e17e98a8 100644
--- a/pkg/services/policer/policer_test.go
+++ b/pkg/services/policer/policer_test.go
@@ -174,7 +174,7 @@ func TestProcessObject(t *testing.T) {
nodes[i].SetPublicKey([]byte{byte(i)})
}
for _, i := range ti.maintenanceNodes {
- nodes[i].SetMaintenance()
+ nodes[i].SetStatus(netmap.Maintenance)
}
var policy netmap.PlacementPolicy
From e5c8f7ff9f49b9e8d0f0a7ac4290aadeba356d6c Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Wed, 18 Sep 2024 12:22:38 +0300
Subject: [PATCH 120/705] [#1382] controlSvc: Replace deprecated methods
Signed-off-by: Dmitrii Stepanov
---
pkg/services/control/ir/server/calls.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/pkg/services/control/ir/server/calls.go b/pkg/services/control/ir/server/calls.go
index 2447a8a74..642932c91 100644
--- a/pkg/services/control/ir/server/calls.go
+++ b/pkg/services/control/ir/server/calls.go
@@ -91,7 +91,7 @@ func (s *Server) RemoveNode(_ context.Context, req *control.RemoveNodeRequest) (
if len(nodeInfo.PublicKey()) == 0 {
return nil, status.Error(codes.NotFound, "no such node")
}
- if nodeInfo.IsOffline() {
+ if nodeInfo.Status().IsOffline() {
return nil, status.Error(codes.FailedPrecondition, "node is already offline")
}
From d4be2f20d4a240dd5a46f09b07b432667ca52f24 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Wed, 18 Sep 2024 12:23:46 +0300
Subject: [PATCH 121/705] [#1382] morph: Replace deprecated methods
Signed-off-by: Dmitrii Stepanov
---
pkg/morph/client/netmap/netmap.go | 6 +++---
pkg/morph/client/netmap/netmap_test.go | 14 +++++++-------
2 files changed, 10 insertions(+), 10 deletions(-)
diff --git a/pkg/morph/client/netmap/netmap.go b/pkg/morph/client/netmap/netmap.go
index 61bbf5f17..f7b5c3ba4 100644
--- a/pkg/morph/client/netmap/netmap.go
+++ b/pkg/morph/client/netmap/netmap.go
@@ -136,11 +136,11 @@ func decodeNodeInfo(dst *netmap.NodeInfo, itemNode stackitem.Item) error {
default:
return fmt.Errorf("unsupported state %v", node.State)
case netmapcontract.NodeStateOnline:
- dst.SetOnline()
+ dst.SetStatus(netmap.Online)
case netmapcontract.NodeStateOffline:
- dst.SetOffline()
+ dst.SetStatus(netmap.Offline)
case netmapcontract.NodeStateMaintenance:
- dst.SetMaintenance()
+ dst.SetStatus(netmap.Maintenance)
}
return nil
diff --git a/pkg/morph/client/netmap/netmap_test.go b/pkg/morph/client/netmap/netmap_test.go
index a8a306197..e686e271e 100644
--- a/pkg/morph/client/netmap/netmap_test.go
+++ b/pkg/morph/client/netmap/netmap_test.go
@@ -20,11 +20,11 @@ func Test_stackItemsToNodeInfos(t *testing.T) {
switch i % 3 {
default:
- expected[i].SetOffline()
+ expected[i].SetStatus(netmap.Offline)
case int(netmapcontract.NodeStateOnline):
- expected[i].SetOnline()
+ expected[i].SetStatus(netmap.Online)
case int(netmapcontract.NodeStateMaintenance):
- expected[i].SetMaintenance()
+ expected[i].SetStatus(netmap.Maintenance)
}
expected[i].SetPublicKey(pub)
@@ -38,12 +38,12 @@ func Test_stackItemsToNodeInfos(t *testing.T) {
var state int64
- switch {
- case expected[i].IsOnline():
+ switch expected[i].Status() {
+ case netmap.Online:
state = int64(netmapcontract.NodeStateOnline)
- case expected[i].IsOffline():
+ case netmap.Offline:
state = int64(netmapcontract.NodeStateOffline)
- case expected[i].IsMaintenance():
+ case netmap.Maintenance:
state = int64(netmapcontract.NodeStateMaintenance)
}
From a603d14d080e2485fdedee4b92306b1ce4aee2b0 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Wed, 18 Sep 2024 12:24:53 +0300
Subject: [PATCH 122/705] [#1382] ir: Replace deprecated methods
Signed-off-by: Dmitrii Stepanov
---
pkg/innerring/processors/netmap/cleanup_table.go | 2 +-
pkg/innerring/processors/netmap/cleanup_table_test.go | 2 +-
pkg/innerring/processors/netmap/handlers_test.go | 2 +-
.../processors/netmap/nodevalidation/state/validator.go | 4 ++--
.../netmap/nodevalidation/state/validator_test.go | 8 ++++----
pkg/innerring/processors/netmap/process_peers.go | 2 +-
6 files changed, 10 insertions(+), 10 deletions(-)
diff --git a/pkg/innerring/processors/netmap/cleanup_table.go b/pkg/innerring/processors/netmap/cleanup_table.go
index c18611569..abd5b089a 100644
--- a/pkg/innerring/processors/netmap/cleanup_table.go
+++ b/pkg/innerring/processors/netmap/cleanup_table.go
@@ -60,7 +60,7 @@ func (c *cleanupTable) update(snapshot netmap.NetMap, now uint64) {
}
access.binNodeInfo = binNodeInfo
- access.maintenance = nmNodes[i].IsMaintenance()
+ access.maintenance = nmNodes[i].Status().IsMaintenance()
newMap[keyString] = access
}
diff --git a/pkg/innerring/processors/netmap/cleanup_table_test.go b/pkg/innerring/processors/netmap/cleanup_table_test.go
index ae5620733..208bd5496 100644
--- a/pkg/innerring/processors/netmap/cleanup_table_test.go
+++ b/pkg/innerring/processors/netmap/cleanup_table_test.go
@@ -127,7 +127,7 @@ func TestCleanupTable(t *testing.T) {
t.Run("skip maintenance nodes", func(t *testing.T) {
cnt := 0
- infos[1].SetMaintenance()
+ infos[1].SetStatus(netmap.Maintenance)
key := netmap.StringifyPublicKey(infos[1])
c.update(networkMap, 5)
diff --git a/pkg/innerring/processors/netmap/handlers_test.go b/pkg/innerring/processors/netmap/handlers_test.go
index b34abb78c..8875880bf 100644
--- a/pkg/innerring/processors/netmap/handlers_test.go
+++ b/pkg/innerring/processors/netmap/handlers_test.go
@@ -146,7 +146,7 @@ func TestAddPeer(t *testing.T) {
require.Nil(t, nc.notaryInvokes, "invalid notary invokes")
- node.SetOnline()
+ node.SetStatus(netmap.Online)
ev = netmapEvent.AddPeer{
NodeBytes: node.Marshal(),
Request: &payload.P2PNotaryRequest{
diff --git a/pkg/innerring/processors/netmap/nodevalidation/state/validator.go b/pkg/innerring/processors/netmap/nodevalidation/state/validator.go
index 4094e50a5..e5165f618 100644
--- a/pkg/innerring/processors/netmap/nodevalidation/state/validator.go
+++ b/pkg/innerring/processors/netmap/nodevalidation/state/validator.go
@@ -56,11 +56,11 @@ func (x *NetMapCandidateValidator) SetNetworkSettings(netSettings NetworkSetting
//
// See also netmap.NodeInfo.IsOnline/SetOnline and other similar methods.
func (x *NetMapCandidateValidator) VerifyAndUpdate(node *netmap.NodeInfo) error {
- if node.IsOnline() {
+ if node.Status().IsOnline() {
return nil
}
- if node.IsMaintenance() {
+ if node.Status().IsMaintenance() {
return x.netSettings.MaintenanceModeAllowed()
}
diff --git a/pkg/innerring/processors/netmap/nodevalidation/state/validator_test.go b/pkg/innerring/processors/netmap/nodevalidation/state/validator_test.go
index a557628f0..b81d7243b 100644
--- a/pkg/innerring/processors/netmap/nodevalidation/state/validator_test.go
+++ b/pkg/innerring/processors/netmap/nodevalidation/state/validator_test.go
@@ -41,22 +41,22 @@ func TestValidator_VerifyAndUpdate(t *testing.T) {
},
{
name: "ONLINE",
- preparer: (*netmap.NodeInfo).SetOnline,
+ preparer: func(ni *netmap.NodeInfo) { ni.SetStatus(netmap.Online) },
valid: true,
},
{
name: "OFFLINE",
- preparer: (*netmap.NodeInfo).SetOffline,
+ preparer: func(ni *netmap.NodeInfo) { ni.SetStatus(netmap.Offline) },
valid: false,
},
{
name: "MAINTENANCE/allowed",
- preparer: (*netmap.NodeInfo).SetMaintenance,
+ preparer: func(ni *netmap.NodeInfo) { ni.SetStatus(netmap.Maintenance) },
valid: true,
},
{
name: "MAINTENANCE/disallowed",
- preparer: (*netmap.NodeInfo).SetMaintenance,
+ preparer: func(ni *netmap.NodeInfo) { ni.SetStatus(netmap.Maintenance) },
valid: false,
validatorPreparer: func(v *state.NetMapCandidateValidator) {
var s testNetworkSettings
diff --git a/pkg/innerring/processors/netmap/process_peers.go b/pkg/innerring/processors/netmap/process_peers.go
index 9e6e8c283..c8c7928a3 100644
--- a/pkg/innerring/processors/netmap/process_peers.go
+++ b/pkg/innerring/processors/netmap/process_peers.go
@@ -62,7 +62,7 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) bool {
// But there is no guarantee that code will be executed in the same order.
// That is why we need to perform `addPeerIR` only in case when node is online,
// because in scope of this method, contract set state `ONLINE` for the node.
- if updated && nodeInfo.IsOnline() {
+ if updated && nodeInfo.Status().IsOnline() {
np.log.Info(logs.NetmapApprovingNetworkMapCandidate,
zap.String("key", keyString))
From ac1eee091dfbb9193c407ac237cd53a26f4f83d9 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Wed, 18 Sep 2024 12:27:10 +0300
Subject: [PATCH 123/705] [#1382] node: Replace deprecated methods
Signed-off-by: Dmitrii Stepanov
---
cmd/frostfs-node/config.go | 8 ++++++--
cmd/frostfs-node/netmap.go | 26 +++++++++++++++-----------
cmd/internal/common/netmap.go | 8 ++++----
3 files changed, 25 insertions(+), 17 deletions(-)
diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go
index ed3a65c25..63f410b89 100644
--- a/cmd/frostfs-node/config.go
+++ b/cmd/frostfs-node/config.go
@@ -1182,7 +1182,9 @@ func (c *cfg) bootstrapWithState(stateSetter func(*netmap.NodeInfo)) error {
// bootstrapOnline calls cfg.bootstrapWithState with "online" state.
func bootstrapOnline(c *cfg) error {
- return c.bootstrapWithState((*netmap.NodeInfo).SetOnline)
+ return c.bootstrapWithState(func(ni *netmap.NodeInfo) {
+ ni.SetStatus(netmap.Online)
+ })
}
// bootstrap calls bootstrapWithState with:
@@ -1193,7 +1195,9 @@ func (c *cfg) bootstrap() error {
st := c.cfgNetmap.state.controlNetmapStatus()
if st == control.NetmapStatus_MAINTENANCE {
c.log.Info(logs.FrostFSNodeBootstrappingWithTheMaintenanceState)
- return c.bootstrapWithState((*netmap.NodeInfo).SetMaintenance)
+ return c.bootstrapWithState(func(ni *netmap.NodeInfo) {
+ ni.SetStatus(netmap.Maintenance)
+ })
}
c.log.Info(logs.FrostFSNodeBootstrappingWithOnlineState,
diff --git a/cmd/frostfs-node/netmap.go b/cmd/frostfs-node/netmap.go
index 8104b1dc1..c0b87492c 100644
--- a/cmd/frostfs-node/netmap.go
+++ b/cmd/frostfs-node/netmap.go
@@ -61,13 +61,15 @@ func (s *networkState) setNodeInfo(ni *netmapSDK.NodeInfo) {
if ni != nil {
s.nodeInfo.Store(*ni)
- switch {
- case ni.IsOnline():
+ switch ni.Status() {
+ case netmapSDK.Online:
ctrlNetSt = control.NetmapStatus_ONLINE
- case ni.IsOffline():
+ case netmapSDK.Offline:
ctrlNetSt = control.NetmapStatus_OFFLINE
- case ni.IsMaintenance():
+ case netmapSDK.Maintenance:
ctrlNetSt = control.NetmapStatus_MAINTENANCE
+ case netmapSDK.UnspecifiedState:
+ ctrlNetSt = control.NetmapStatus_STATUS_UNDEFINED
}
} else {
ctrlNetSt = control.NetmapStatus_OFFLINE
@@ -78,7 +80,7 @@ func (s *networkState) setNodeInfo(ni *netmapSDK.NodeInfo) {
// nil ni means that the node is not included
// in the netmap
- niOld.SetOffline()
+ niOld.SetStatus(netmapSDK.Offline)
s.nodeInfo.Store(niOld)
}
@@ -139,7 +141,7 @@ func initNetmapService(ctx context.Context, c *cfg) {
network.WriteToNodeInfo(c.localAddr, &c.cfgNodeInfo.localInfo)
c.cfgNodeInfo.localInfo.SetPublicKey(c.key.PublicKey().Bytes())
parseAttributes(c)
- c.cfgNodeInfo.localInfo.SetOffline()
+ c.cfgNodeInfo.localInfo.SetStatus(netmapSDK.Offline)
if c.cfgMorph.client == nil {
initMorphComponents(ctx, c)
@@ -252,7 +254,7 @@ func initNetmapState(c *cfg) {
zap.String("state", stateWord),
)
- if ni != nil && ni.IsMaintenance() {
+ if ni != nil && ni.Status().IsMaintenance() {
c.isMaintenance.Store(true)
}
@@ -263,13 +265,15 @@ func initNetmapState(c *cfg) {
func nodeState(ni *netmapSDK.NodeInfo) string {
if ni != nil {
- switch {
- case ni.IsOnline():
+ switch ni.Status() {
+ case netmapSDK.Online:
return "online"
- case ni.IsOffline():
+ case netmapSDK.Offline:
return "offline"
- case ni.IsMaintenance():
+ case netmapSDK.Maintenance:
return "maintenance"
+ case netmapSDK.UnspecifiedState:
+ return "undefined"
}
}
return "undefined"
diff --git a/cmd/internal/common/netmap.go b/cmd/internal/common/netmap.go
index 79b03a726..f550552d2 100644
--- a/cmd/internal/common/netmap.go
+++ b/cmd/internal/common/netmap.go
@@ -14,14 +14,14 @@ func PrettyPrintNodeInfo(cmd *cobra.Command, node netmap.NodeInfo,
) {
var strState string
- switch {
+ switch node.Status() {
default:
strState = "STATE_UNSUPPORTED"
- case node.IsOnline():
+ case netmap.Online:
strState = "ONLINE"
- case node.IsOffline():
+ case netmap.Offline:
strState = "OFFLINE"
- case node.IsMaintenance():
+ case netmap.Maintenance:
strState = "MAINTENANCE"
}
From 3441fff05dd61647e7bd069db34f320e7e9efe9a Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Wed, 18 Sep 2024 12:27:39 +0300
Subject: [PATCH 124/705] [#1382] cli: Replace deprecated methods
Signed-off-by: Dmitrii Stepanov
---
cmd/frostfs-cli/modules/netmap/nodeinfo.go | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/cmd/frostfs-cli/modules/netmap/nodeinfo.go b/cmd/frostfs-cli/modules/netmap/nodeinfo.go
index b6ec48f35..ae4bb329a 100644
--- a/cmd/frostfs-cli/modules/netmap/nodeinfo.go
+++ b/cmd/frostfs-cli/modules/netmap/nodeinfo.go
@@ -49,14 +49,14 @@ func prettyPrintNodeInfo(cmd *cobra.Command, i netmap.NodeInfo) {
cmd.Println("key:", hex.EncodeToString(i.PublicKey()))
var stateWord string
- switch {
+ switch i.Status() {
default:
stateWord = ""
- case i.IsOnline():
+ case netmap.Online:
stateWord = "online"
- case i.IsOffline():
+ case netmap.Offline:
stateWord = "offline"
- case i.IsMaintenance():
+ case netmap.Maintenance:
stateWord = "maintenance"
}
From 61d5e140e051f92222fa9152d6bd807d505ca1e8 Mon Sep 17 00:00:00 2001
From: Airat Arifullin
Date: Wed, 18 Sep 2024 12:13:15 +0300
Subject: [PATCH 125/705] [#1383] object: Add restrictions for `Patch` method
* `Patch` can't be applied for non-regular type object (tombstones,
locks etc.)
* Complex object parts can't be patched. So, if an object has EC/Split
header, it won't be patched.
Signed-off-by: Airat Arifullin
---
pkg/services/object/patch/streamer.go | 19 +++++++++++++++++++
1 file changed, 19 insertions(+)
diff --git a/pkg/services/object/patch/streamer.go b/pkg/services/object/patch/streamer.go
index 85c28cda0..73def8c7c 100644
--- a/pkg/services/object/patch/streamer.go
+++ b/pkg/services/object/patch/streamer.go
@@ -57,12 +57,31 @@ func toFullObjectHeader(hdr *objectSDK.Object) objectV2.GetHeaderPart {
return hs
}
+func isLinkObject(hdr *objectV2.HeaderWithSignature) bool {
+ split := hdr.GetHeader().GetSplit()
+ return len(split.GetChildren()) > 0 && split.GetParent() != nil
+}
+
+func isComplexObjectPart(hdr *objectV2.HeaderWithSignature) bool {
+ return hdr.GetHeader().GetEC() != nil || hdr.GetHeader().GetSplit() != nil
+}
+
func (s *Streamer) init(ctx context.Context, req *objectV2.PatchRequest) error {
hdrWithSig, addr, err := s.readHeader(ctx, req)
if err != nil {
return err
}
+ if hdrWithSig.GetHeader().GetObjectType() != objectV2.TypeRegular {
+ return errors.New("non-regular object can't be patched")
+ }
+ if isLinkObject(hdrWithSig) {
+ return errors.New("linking object can't be patched")
+ }
+ if isComplexObjectPart(hdrWithSig) {
+ return errors.New("complex object parts can't be patched")
+ }
+
commonPrm, err := util.CommonPrmFromV2(req)
if err != nil {
return err
From 945b7c740b0deb4a2f16bb85f20efd8820762f53 Mon Sep 17 00:00:00 2001
From: Alexander Chuprov
Date: Wed, 18 Sep 2024 18:14:54 +0300
Subject: [PATCH 126/705] [#1372] adm/morph: Add delta flag to
'force-new-epoch'
Signed-off-by: Alexander Chuprov
---
cmd/frostfs-adm/internal/commonflags/flags.go | 1 +
cmd/frostfs-adm/internal/modules/morph/helper/netmap.go | 8 ++++++--
cmd/frostfs-adm/internal/modules/morph/netmap/epoch.go | 3 ++-
cmd/frostfs-adm/internal/modules/morph/netmap/root.go | 2 ++
cmd/frostfs-adm/internal/modules/morph/node/remove.go | 2 +-
5 files changed, 12 insertions(+), 4 deletions(-)
diff --git a/cmd/frostfs-adm/internal/commonflags/flags.go b/cmd/frostfs-adm/internal/commonflags/flags.go
index 81395edb0..b51d2e115 100644
--- a/cmd/frostfs-adm/internal/commonflags/flags.go
+++ b/cmd/frostfs-adm/internal/commonflags/flags.go
@@ -39,4 +39,5 @@ const (
CustomZoneFlag = "domain"
AlphabetSizeFlag = "size"
AllFlag = "all"
+ DeltaFlag = "delta"
)
diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/netmap.go b/cmd/frostfs-adm/internal/modules/morph/helper/netmap.go
index 7a778f8c3..fb8f03783 100644
--- a/cmd/frostfs-adm/internal/modules/morph/helper/netmap.go
+++ b/cmd/frostfs-adm/internal/modules/morph/helper/netmap.go
@@ -72,13 +72,17 @@ func InvalidConfigValueErr(key string) error {
return fmt.Errorf("invalid %s config value from netmap contract", key)
}
-func EmitNewEpochCall(bw *io.BufBinWriter, wCtx *InitializeContext, nmHash util.Uint160) error {
+func EmitNewEpochCall(bw *io.BufBinWriter, wCtx *InitializeContext, nmHash util.Uint160, countEpoch int64) error {
+ if countEpoch <= 0 {
+ return errors.New("number of epochs cannot be less than 1")
+ }
+
curr, err := unwrap.Int64(wCtx.ReadOnlyInvoker.Call(nmHash, "epoch"))
if err != nil {
return errors.New("can't fetch current epoch from the netmap contract")
}
- newEpoch := curr + 1
+ newEpoch := curr + countEpoch
wCtx.Command.Printf("Current epoch: %d, increase to %d.\n", curr, newEpoch)
// In NeoFS this is done via Notary contract. Here, however, we can form the
diff --git a/cmd/frostfs-adm/internal/modules/morph/netmap/epoch.go b/cmd/frostfs-adm/internal/modules/morph/netmap/epoch.go
index df9a03fd1..5e4e9c725 100644
--- a/cmd/frostfs-adm/internal/modules/morph/netmap/epoch.go
+++ b/cmd/frostfs-adm/internal/modules/morph/netmap/epoch.go
@@ -4,6 +4,7 @@ import (
"fmt"
"strings"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
"github.com/nspcc-dev/neo-go/pkg/io"
@@ -30,7 +31,7 @@ func ForceNewEpochCmd(cmd *cobra.Command, _ []string) error {
}
bw := io.NewBufBinWriter()
- if err := helper.EmitNewEpochCall(bw, wCtx, nmHash); err != nil {
+ if err := helper.EmitNewEpochCall(bw, wCtx, nmHash, viper.GetInt64(commonflags.DeltaFlag)); err != nil {
return err
}
diff --git a/cmd/frostfs-adm/internal/modules/morph/netmap/root.go b/cmd/frostfs-adm/internal/modules/morph/netmap/root.go
index 31fda860e..0288bcdc5 100644
--- a/cmd/frostfs-adm/internal/modules/morph/netmap/root.go
+++ b/cmd/frostfs-adm/internal/modules/morph/netmap/root.go
@@ -22,6 +22,7 @@ var (
PreRun: func(cmd *cobra.Command, _ []string) {
_ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
_ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
+ _ = viper.BindPFlag(commonflags.DeltaFlag, cmd.Flags().Lookup(commonflags.DeltaFlag))
},
RunE: ForceNewEpochCmd,
}
@@ -35,6 +36,7 @@ func initForceNewEpochCmd() {
ForceNewEpoch.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
ForceNewEpoch.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
ForceNewEpoch.Flags().String(commonflags.LocalDumpFlag, "", "Path to the blocks dump file")
+ ForceNewEpoch.Flags().Int64(commonflags.DeltaFlag, 1, "Number of epochs to increase the current epoch")
}
func init() {
diff --git a/cmd/frostfs-adm/internal/modules/morph/node/remove.go b/cmd/frostfs-adm/internal/modules/morph/node/remove.go
index 0a19102ba..e47451e0c 100644
--- a/cmd/frostfs-adm/internal/modules/morph/node/remove.go
+++ b/cmd/frostfs-adm/internal/modules/morph/node/remove.go
@@ -53,7 +53,7 @@ func RemoveNodesCmd(cmd *cobra.Command, args []string) error {
int64(netmapcontract.NodeStateOffline), nodeKeys[i].Bytes())
}
- if err := helper.EmitNewEpochCall(bw, wCtx, nmHash); err != nil {
+ if err := helper.EmitNewEpochCall(bw, wCtx, nmHash, 1); err != nil {
return err
}
From 1361db91ee37d3da938dc5146cc3f15f9ee33517 Mon Sep 17 00:00:00 2001
From: Alexander Chuprov
Date: Mon, 16 Sep 2024 11:09:51 +0300
Subject: [PATCH 127/705] [#1301] adm/morph: Add flag -v to 'Tokens'
Signed-off-by: Alexander Chuprov
---
.../internal/modules/morph/nns/tokens.go | 44 ++++++++++++++++++-
1 file changed, 43 insertions(+), 1 deletion(-)
diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/tokens.go b/cmd/frostfs-adm/internal/modules/morph/nns/tokens.go
index 6e8ffb40a..3c7136e9d 100644
--- a/cmd/frostfs-adm/internal/modules/morph/nns/tokens.go
+++ b/cmd/frostfs-adm/internal/modules/morph/nns/tokens.go
@@ -1,15 +1,25 @@
package nns
import (
+ "math/big"
+ "strings"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-contract/nns"
+ client "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/nns"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"github.com/spf13/cobra"
)
+const (
+ verboseDesc = "Include additional information about CNAME record."
+)
+
func initTokensCmd() {
Cmd.AddCommand(tokensCmd)
tokensCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
tokensCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
+ tokensCmd.Flags().BoolP(commonflags.Verbose, commonflags.VerboseShorthand, false, verboseDesc)
}
func listTokens(cmd *cobra.Command, _ []string) {
@@ -18,7 +28,39 @@ func listTokens(cmd *cobra.Command, _ []string) {
commonCmd.ExitOnErr(cmd, "unable to get tokens: %w", err)
for toks, err := it.Next(10); err == nil && len(toks) > 0; toks, err = it.Next(10) {
for _, token := range toks {
- cmd.Println(string(token))
+ output := string(token)
+ if verbose, _ := cmd.Flags().GetBool(commonflags.Verbose); verbose {
+ cname, err := getCnameRecord(c, token)
+ commonCmd.ExitOnErr(cmd, "", err)
+ if cname != "" {
+ output += " (CNAME: " + cname + ")"
+ }
+ }
+ cmd.Println(output)
}
}
}
+
+func getCnameRecord(c *client.Contract, token []byte) (string, error) {
+ items, err := c.GetRecords(string(token), big.NewInt(int64(nns.CNAME)))
+
+ // GetRecords returns the error "not an array" if the domain does not contain records.
+ if err != nil && strings.Contains(err.Error(), "not an array") {
+ return "", nil
+ }
+
+ if err != nil {
+ return "", err
+ }
+
+ if len(items) == 0 {
+ return "", nil
+ }
+
+ record, err := items[0].TryBytes()
+ if err != nil {
+ return "", err
+ }
+
+ return string(record), nil
+}
From 5a53f9c4fd52243dd36c69e62d79f344342d4349 Mon Sep 17 00:00:00 2001
From: Alexander Chuprov
Date: Thu, 19 Sep 2024 14:19:16 +0300
Subject: [PATCH 128/705] [#1301] go.mod: Bump frostfs-contract
Signed-off-by: Alexander Chuprov
---
go.mod | 6 +++---
go.sum | 8 ++++----
2 files changed, 7 insertions(+), 7 deletions(-)
diff --git a/go.mod b/go.mod
index 78dce0131..502761866 100644
--- a/go.mod
+++ b/go.mod
@@ -5,7 +5,7 @@ go 1.22
require (
code.gitea.io/sdk/gitea v0.17.1
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240916093537-13fa0da3741e
- git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e
+ git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240909072709-3e221b973a3c
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20240909114314-666d326cc573
@@ -28,7 +28,7 @@ require (
github.com/mitchellh/go-homedir v1.1.0
github.com/mr-tron/base58 v1.2.0
github.com/multiformats/go-multiaddr v0.12.1
- github.com/nspcc-dev/neo-go v0.106.2
+ github.com/nspcc-dev/neo-go v0.106.3
github.com/olekukonko/tablewriter v0.0.5
github.com/panjf2000/ants/v2 v2.9.0
github.com/prometheus/client_golang v1.19.0
@@ -100,7 +100,7 @@ require (
github.com/multiformats/go-multihash v0.2.3 // indirect
github.com/multiformats/go-varint v0.0.7 // indirect
github.com/nspcc-dev/go-ordered-json v0.0.0-20240301084351-0246b013f8b2 // indirect
- github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20240521091047-78685785716d // indirect
+ github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20240727093519-1a48f1ce43ec // indirect
github.com/nspcc-dev/rfc6979 v0.2.1 // indirect
github.com/pelletier/go-toml/v2 v2.2.2 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
diff --git a/go.sum b/go.sum
index dd0e31088..85d9df443 100644
--- a/go.sum
+++ b/go.sum
@@ -2,8 +2,8 @@ code.gitea.io/sdk/gitea v0.17.1 h1:3jCPOG2ojbl8AcfaUCRYLT5MUcBMFwS0OSK2mA5Zok8=
code.gitea.io/sdk/gitea v0.17.1/go.mod h1:aCnBqhHpoEWA180gMbaCtdX9Pl6BWBAuuP2miadoTNM=
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240916093537-13fa0da3741e h1:740ABnOBYx4o6jxULHdSSnVW2fYIO35ohg+Uz59sxd0=
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240916093537-13fa0da3741e/go.mod h1:F5GS7hRb62PUy5sTYDC4ajVdeffoAfjHSSHTKUJEaYU=
-git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e h1:kcBqZBiFIUBATUqEuvVigtkJJWQ2Gug/eYXn967o3M4=
-git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e/go.mod h1:F/fe1OoIDKr5Bz99q4sriuHDuf3aZefZy9ZsCqEtgxc=
+git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240909072709-3e221b973a3c h1:9JmXxWcqWaOXModBwtj/WlPYOfSk2jRSDoRsDRYvgnA=
+git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240909072709-3e221b973a3c/go.mod h1:DVb1AqP323wiEwCtlfoJJTERF4Xji2L39X6T8kv26RE=
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSVCB8JNSfPG7Uk4r2oSk=
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0/go.mod h1:RUIKZATQLJ+TaYQa60X2fTDwfuhMfm8Ar60bQ5fr+vU=
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d h1:uJ/wvuMdepbkaV8XMS5uN9B0FQWMep0CttSuDZiDhq0=
@@ -188,8 +188,8 @@ github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/n
github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU=
github.com/nspcc-dev/go-ordered-json v0.0.0-20240301084351-0246b013f8b2 h1:mD9hU3v+zJcnHAVmHnZKt3I++tvn30gBj2rP2PocZMk=
github.com/nspcc-dev/go-ordered-json v0.0.0-20240301084351-0246b013f8b2/go.mod h1:U5VfmPNM88P4RORFb6KSUVBdJBDhlqggJZYGXGPxOcc=
-github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20240521091047-78685785716d h1:Vcb7YkZuUSSIC+WF/xV3UDfHbAxZgyT2zGleJP3Ig5k=
-github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20240521091047-78685785716d/go.mod h1:/vrbWSHc7YS1KSYhVOyyeucXW/e+1DkVBOgnBEXUCeY=
+github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20240727093519-1a48f1ce43ec h1:vDrbVXF2+2uP0RlkZmem3QYATcXCu9BzzGGCNsNcK7Q=
+github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20240727093519-1a48f1ce43ec/go.mod h1:/vrbWSHc7YS1KSYhVOyyeucXW/e+1DkVBOgnBEXUCeY=
github.com/nspcc-dev/rfc6979 v0.2.1 h1:8wWxkamHWFmO790GsewSoKUSJjVnL1fmdRpokU/RgRM=
github.com/nspcc-dev/rfc6979 v0.2.1/go.mod h1:Tk7h5kyUWkhjyO3zUgFFhy1v2vQv3BvQEntakdtqrWc=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
From 53a90634fc0a55be636a220b461be731f8a91b3f Mon Sep 17 00:00:00 2001
From: Alexander Chuprov
Date: Thu, 19 Sep 2024 14:19:41 +0300
Subject: [PATCH 129/705] [#1301] adm/morph: Add 'delete' domains
Signed-off-by: Alexander Chuprov
---
.../morph/nns/{register.go => domains.go} | 20 +++++++++++++++++++
.../internal/modules/morph/nns/root.go | 10 ++++++++++
2 files changed, 30 insertions(+)
rename cmd/frostfs-adm/internal/modules/morph/nns/{register.go => domains.go} (73%)
diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/register.go b/cmd/frostfs-adm/internal/modules/morph/nns/domains.go
similarity index 73%
rename from cmd/frostfs-adm/internal/modules/morph/nns/register.go
rename to cmd/frostfs-adm/internal/modules/morph/nns/domains.go
index d05d9f171..3684db94a 100644
--- a/cmd/frostfs-adm/internal/modules/morph/nns/register.go
+++ b/cmd/frostfs-adm/internal/modules/morph/nns/domains.go
@@ -42,3 +42,23 @@ func registerDomain(cmd *cobra.Command, _ []string) {
commonCmd.ExitOnErr(cmd, "register domain error: %w", err)
cmd.Println("Domain registered successfully")
}
+
+func initDeleteCmd() {
+ Cmd.AddCommand(deleteCmd)
+ deleteCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
+ deleteCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
+ deleteCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc)
+
+ _ = cobra.MarkFlagRequired(deleteCmd.Flags(), nnsNameFlag)
+}
+
+func deleteDomain(cmd *cobra.Command, _ []string) {
+ c, actor, _ := getRPCClient(cmd)
+
+ name, _ := cmd.Flags().GetString(nnsNameFlag)
+ h, vub, err := c.DeleteDomain(name)
+
+ _, err = actor.Wait(h, vub, err)
+ commonCmd.ExitOnErr(cmd, "delete domain error: %w", err)
+ cmd.Println("Domain deleted successfully")
+}
diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/root.go b/cmd/frostfs-adm/internal/modules/morph/nns/root.go
index e528e4b7b..56774c292 100644
--- a/cmd/frostfs-adm/internal/modules/morph/nns/root.go
+++ b/cmd/frostfs-adm/internal/modules/morph/nns/root.go
@@ -42,6 +42,15 @@ var (
},
Run: registerDomain,
}
+ deleteCmd = &cobra.Command{
+ Use: "delete",
+ Short: "Delete a domain by name",
+ PreRun: func(cmd *cobra.Command, _ []string) {
+ _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
+ _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
+ },
+ Run: deleteDomain,
+ }
renewCmd = &cobra.Command{
Use: "renew",
Short: "Increases domain expiration date",
@@ -91,6 +100,7 @@ var (
func init() {
initTokensCmd()
initRegisterCmd()
+ initDeleteCmd()
initRenewCmd()
initUpdateCmd()
initAddRecordCmd()
From c290d079fd71ddc851cce8d06f496d27ceedc168 Mon Sep 17 00:00:00 2001
From: Aleksey Savchuk
Date: Fri, 20 Sep 2024 10:53:02 +0300
Subject: [PATCH 130/705] [#1312] go.mod: Update sdk-go
Signed-off-by: Aleksey Savchuk
---
go.mod | 2 +-
go.sum | 4 ++--
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/go.mod b/go.mod
index 502761866..9817f8527 100644
--- a/go.mod
+++ b/go.mod
@@ -9,7 +9,7 @@ require (
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20240909114314-666d326cc573
- git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240916123151-6009d089fc69
+ git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240918095938-e580ee991d98
git.frostfs.info/TrueCloudLab/hrw v1.2.1
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240814080254-96225afacb88
git.frostfs.info/TrueCloudLab/tzhash v1.8.0
diff --git a/go.sum b/go.sum
index 85d9df443..3c6dd9a99 100644
--- a/go.sum
+++ b/go.sum
@@ -10,8 +10,8 @@ git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d/go.mod h1:7ZZq8iguY7qFsXajdHGmZd2AW4QbucyrJwhbsRfOfek=
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20240909114314-666d326cc573 h1:6qCcm1oqFbmf9C5AauXzrL5OPGnTbI9HoB/jAtD9274=
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20240909114314-666d326cc573/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g=
-git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240916123151-6009d089fc69 h1:DJExzndXf6hztcQ8zHlBOJV/+FA6k2FpRGUcTDWqq2M=
-git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240916123151-6009d089fc69/go.mod h1:GeNpo12HcEW4J412sH5yf8xFYapxlrt5fcYzRwg0Ino=
+git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240918095938-e580ee991d98 h1:ijUci3thz0EwWkuRJDocW5D1RkVAJlt9xNG4CYepC90=
+git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240918095938-e580ee991d98/go.mod h1:GeNpo12HcEW4J412sH5yf8xFYapxlrt5fcYzRwg0Ino=
git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc=
git.frostfs.info/TrueCloudLab/hrw v1.2.1/go.mod h1:C1Ygde2n843yTZEQ0FP69jYiuaYV0kriLvP4zm8JuvM=
git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20240726093631-5481339d6928 h1:LK3mCkNZkY48eBA9jnk1N0eQZLsZhOG+XYw4EBoKUjM=
From c34b8acedde282bbe81efccea772a923ee570a8f Mon Sep 17 00:00:00 2001
From: Aleksey Savchuk
Date: Fri, 20 Sep 2024 10:58:22 +0300
Subject: [PATCH 131/705] [#1312] Drop handling of system attributes with NeoFS
prefix
Signed-off-by: Aleksey Savchuk
---
cmd/frostfs-cli/modules/container/list_objects.go | 13 ++-----------
pkg/core/object/fmt.go | 2 +-
pkg/local_object_storage/metabase/put.go | 4 ----
pkg/local_object_storage/metabase/upgrade.go | 2 +-
pkg/services/object_manager/tombstone/checker.go | 2 +-
5 files changed, 5 insertions(+), 18 deletions(-)
diff --git a/cmd/frostfs-cli/modules/container/list_objects.go b/cmd/frostfs-cli/modules/container/list_objects.go
index ff2f8cf45..d5850359d 100644
--- a/cmd/frostfs-cli/modules/container/list_objects.go
+++ b/cmd/frostfs-cli/modules/container/list_objects.go
@@ -1,9 +1,6 @@
package container
import (
- "strings"
-
- v2object "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
@@ -67,14 +64,8 @@ var listContainerObjectsCmd = &cobra.Command{
resHead, err := internalclient.HeadObject(cmd.Context(), prmHead)
if err == nil {
- attrs := resHead.Header().Attributes()
- for i := range attrs {
- attrKey := attrs[i].Key()
- if !strings.HasPrefix(attrKey, v2object.SysAttributePrefix) && !strings.HasPrefix(attrKey, v2object.SysAttributePrefixNeoFS) {
- // FIXME(@cthulhu-rider): https://git.frostfs.info/TrueCloudLab/frostfs-sdk-go/issues/97
- // Use dedicated method to skip system attributes.
- cmd.Printf(" %s: %s\n", attrKey, attrs[i].Value())
- }
+ for _, attr := range resHead.Header().UserAttributes() {
+ cmd.Printf(" %s: %s\n", attr.Key(), attr.Value())
}
} else {
cmd.Printf(" failed to read attributes: %v\n", err)
diff --git a/pkg/core/object/fmt.go b/pkg/core/object/fmt.go
index 96f721806..317d62cb0 100644
--- a/pkg/core/object/fmt.go
+++ b/pkg/core/object/fmt.go
@@ -361,7 +361,7 @@ func (v *FormatValidator) checkIfExpired(ctx context.Context, obj *objectSDK.Obj
func expirationEpochAttribute(obj *objectSDK.Object) (uint64, error) {
for _, a := range obj.Attributes() {
- if a.Key() != objectV2.SysAttributeExpEpoch && a.Key() != objectV2.SysAttributeExpEpochNeoFS {
+ if a.Key() != objectV2.SysAttributeExpEpoch {
continue
}
diff --git a/pkg/local_object_storage/metabase/put.go b/pkg/local_object_storage/metabase/put.go
index ff79a0387..087529895 100644
--- a/pkg/local_object_storage/metabase/put.go
+++ b/pkg/local_object_storage/metabase/put.go
@@ -387,10 +387,6 @@ func hasExpirationEpoch(obj *objectSDK.Object) (uint64, bool) {
attributes = ech.ParentAttributes()
}
for _, attr := range attributes {
- if attr.Key() == objectV2.SysAttributeExpEpochNeoFS {
- expEpoch, err := strconv.ParseUint(attr.Value(), 10, 64)
- return expEpoch, err == nil
- }
if attr.Key() == objectV2.SysAttributeExpEpoch {
expEpoch, err := strconv.ParseUint(attr.Value(), 10, 64)
return expEpoch, err == nil
diff --git a/pkg/local_object_storage/metabase/upgrade.go b/pkg/local_object_storage/metabase/upgrade.go
index f677dcf8e..b5de430dc 100644
--- a/pkg/local_object_storage/metabase/upgrade.go
+++ b/pkg/local_object_storage/metabase/upgrade.go
@@ -252,7 +252,7 @@ func getNextObjectsWithExpirationEpochBatch(ctx context.Context, db *bbolt.DB, i
continue
}
attributeKey := string(attrKey[1+cidSize:])
- if attributeKey != objectV2.SysAttributeExpEpochNeoFS && attributeKey != objectV2.SysAttributeExpEpoch {
+ if attributeKey != objectV2.SysAttributeExpEpoch {
continue
}
var containerID cid.ID
diff --git a/pkg/services/object_manager/tombstone/checker.go b/pkg/services/object_manager/tombstone/checker.go
index c3c810001..48a08b693 100644
--- a/pkg/services/object_manager/tombstone/checker.go
+++ b/pkg/services/object_manager/tombstone/checker.go
@@ -74,7 +74,7 @@ func (g *ExpirationChecker) IsTombstoneAvailable(ctx context.Context, a oid.Addr
func (g *ExpirationChecker) handleTS(addr string, ts *objectSDK.Object, reqEpoch uint64) bool {
for _, atr := range ts.Attributes() {
- if atr.Key() == objectV2.SysAttributeExpEpoch || atr.Key() == objectV2.SysAttributeExpEpochNeoFS {
+ if atr.Key() == objectV2.SysAttributeExpEpoch {
epoch, err := strconv.ParseUint(atr.Value(), 10, 64)
if err != nil {
g.log.Warn(
From f71418b73cfb49306ec1a191621b954a75105b18 Mon Sep 17 00:00:00 2001
From: Vitaliy Potyarkin
Date: Fri, 20 Sep 2024 10:24:40 +0000
Subject: [PATCH 132/705] [#1386] frostfs-adm: Add info to error messages
These error messages bubble up to human users - adding more context helps
to find the cause of the issue faster.
Signed-off-by: Vitaliy Potyarkin
---
.../modules/morph/initialize/initialize_roles.go | 10 ++++++++--
.../morph/initialize/initialize_transfer.go | 15 ++++++++++++++-
2 files changed, 22 insertions(+), 3 deletions(-)
diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_roles.go b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_roles.go
index a6815ee13..05bc83a8b 100644
--- a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_roles.go
+++ b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_roles.go
@@ -1,6 +1,8 @@
package initialize
import (
+ "fmt"
+
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
"github.com/nspcc-dev/neo-go/pkg/core/native/noderoles"
"github.com/nspcc-dev/neo-go/pkg/io"
@@ -29,10 +31,14 @@ func setNotaryAndAlphabetNodes(c *helper.InitializeContext) error {
callflag.States|callflag.AllowNotify, int64(noderoles.NeoFSAlphabet), pubs)
if err := c.SendCommitteeTx(w.Bytes(), false); err != nil {
- return err
+ return fmt.Errorf("send committee transaction: %w", err)
}
- return c.AwaitTx()
+ err := c.AwaitTx()
+ if err != nil {
+ err = fmt.Errorf("await committee transaction: %w", err)
+ }
+ return err
}
func setRolesFinished(c *helper.InitializeContext) (bool, error) {
diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_transfer.go b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_transfer.go
index b7102fa13..d7b0ec86c 100644
--- a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_transfer.go
+++ b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_transfer.go
@@ -3,6 +3,7 @@ package initialize
import (
"fmt"
"math/big"
+ "strings"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
@@ -144,5 +145,17 @@ func createNEP17MultiTransferTx(c helper.Client, acc *wallet.Account, recipients
if err != nil {
return nil, fmt.Errorf("can't create actor: %w", err)
}
- return act.MakeRun(w.Bytes())
+ tx, err := act.MakeRun(w.Bytes())
+ if err != nil {
+ sum := make(map[util.Uint160]int64)
+ for _, recipient := range recipients {
+ sum[recipient.Token] += recipient.Amount
+ }
+ detail := make([]string, 0, len(sum))
+ for _, value := range sum {
+ detail = append(detail, fmt.Sprintf("amount=%v", value))
+ }
+ err = fmt.Errorf("transfer failed: from=%s(%s) %s: %w", acc.Label, acc.Address, strings.Join(detail, " "), err)
+ }
+ return tx, err
}
From 0b87be804a63760fc7e43a51cc1b00c5aeedbb34 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Tue, 17 Sep 2024 11:24:48 +0300
Subject: [PATCH 133/705] [#1381] engine: Fix tests
Drop not required `Eventually` calls.
Signed-off-by: Dmitrii Stepanov
---
pkg/local_object_storage/shard/delete_test.go | 14 ++++-------
pkg/local_object_storage/shard/get_test.go | 23 ++++---------------
pkg/local_object_storage/shard/head_test.go | 19 ++-------------
pkg/local_object_storage/shard/inhume_test.go | 2 +-
4 files changed, 11 insertions(+), 47 deletions(-)
diff --git a/pkg/local_object_storage/shard/delete_test.go b/pkg/local_object_storage/shard/delete_test.go
index 9f205fa5d..574250a93 100644
--- a/pkg/local_object_storage/shard/delete_test.go
+++ b/pkg/local_object_storage/shard/delete_test.go
@@ -3,7 +3,6 @@ package shard
import (
"context"
"testing"
- "time"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
@@ -58,19 +57,14 @@ func testShard(t *testing.T, hasWriteCache bool, payloadSize int) {
_, err := sh.Put(context.Background(), putPrm)
require.NoError(t, err)
- _, err = testGet(t, sh, getPrm, hasWriteCache)
+ _, err = sh.Get(context.Background(), getPrm)
require.NoError(t, err)
if hasWriteCache {
- sh.FlushWriteCache(context.Background(), FlushWriteCachePrm{ignoreErrors: false})
- require.Eventually(t, func() bool {
- _, err = sh.Delete(context.Background(), delPrm)
- return err == nil
- }, 30*time.Second, 10*time.Millisecond)
- } else {
- _, err = sh.Delete(context.Background(), delPrm)
- require.NoError(t, err)
+ require.NoError(t, sh.FlushWriteCache(context.Background(), FlushWriteCachePrm{ignoreErrors: false}))
}
+ _, err = sh.Delete(context.Background(), delPrm)
+ require.NoError(t, err)
_, err = sh.Get(context.Background(), getPrm)
require.True(t, client.IsErrObjectNotFound(err))
diff --git a/pkg/local_object_storage/shard/get_test.go b/pkg/local_object_storage/shard/get_test.go
index 8a7c6972d..d0eecf74e 100644
--- a/pkg/local_object_storage/shard/get_test.go
+++ b/pkg/local_object_storage/shard/get_test.go
@@ -5,11 +5,9 @@ import (
"context"
"errors"
"testing"
- "time"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
@@ -49,7 +47,7 @@ func testShardGet(t *testing.T, hasWriteCache bool) {
getPrm.SetAddress(object.AddressOf(obj))
- res, err := testGet(t, sh, getPrm, hasWriteCache)
+ res, err := sh.Get(context.Background(), getPrm)
require.NoError(t, err)
require.Equal(t, obj, res.Object())
})
@@ -67,7 +65,7 @@ func testShardGet(t *testing.T, hasWriteCache bool) {
getPrm.SetAddress(object.AddressOf(obj))
- res, err := testGet(t, sh, getPrm, hasWriteCache)
+ res, err := sh.Get(context.Background(), getPrm)
require.NoError(t, err)
require.Equal(t, obj, res.Object())
})
@@ -95,13 +93,13 @@ func testShardGet(t *testing.T, hasWriteCache bool) {
getPrm.SetAddress(object.AddressOf(child))
- res, err := testGet(t, sh, getPrm, hasWriteCache)
+ res, err := sh.Get(context.Background(), getPrm)
require.NoError(t, err)
require.True(t, binaryEqual(child, res.Object()))
getPrm.SetAddress(object.AddressOf(parent))
- _, err = testGet(t, sh, getPrm, hasWriteCache)
+ _, err = sh.Get(context.Background(), getPrm)
var si *objectSDK.SplitInfoError
require.True(t, errors.As(err, &si))
@@ -115,19 +113,6 @@ func testShardGet(t *testing.T, hasWriteCache bool) {
})
}
-func testGet(t *testing.T, sh *Shard, getPrm GetPrm, hasWriteCache bool) (GetRes, error) {
- res, err := sh.Get(context.Background(), getPrm)
- if hasWriteCache {
- require.Eventually(t, func() bool {
- if client.IsErrObjectNotFound(err) {
- res, err = sh.Get(context.Background(), getPrm)
- }
- return !client.IsErrObjectNotFound(err)
- }, time.Second, time.Millisecond*100)
- }
- return res, err
-}
-
// binary equal is used when object contains empty lists in the structure and
// requre.Equal fails on comparing and []{} lists.
func binaryEqual(a, b *objectSDK.Object) bool {
diff --git a/pkg/local_object_storage/shard/head_test.go b/pkg/local_object_storage/shard/head_test.go
index 1f4631993..c65bbb1e3 100644
--- a/pkg/local_object_storage/shard/head_test.go
+++ b/pkg/local_object_storage/shard/head_test.go
@@ -4,11 +4,9 @@ import (
"context"
"errors"
"testing"
- "time"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"github.com/stretchr/testify/require"
@@ -46,7 +44,7 @@ func testShardHead(t *testing.T, hasWriteCache bool) {
headPrm.SetAddress(object.AddressOf(obj))
- res, err := testHead(t, sh, headPrm, hasWriteCache)
+ res, err := sh.Head(context.Background(), headPrm)
require.NoError(t, err)
require.Equal(t, obj.CutPayload(), res.Object())
})
@@ -74,7 +72,7 @@ func testShardHead(t *testing.T, hasWriteCache bool) {
var siErr *objectSDK.SplitInfoError
- _, err = testHead(t, sh, headPrm, hasWriteCache)
+ _, err = sh.Head(context.Background(), headPrm)
require.True(t, errors.As(err, &siErr))
headPrm.SetAddress(object.AddressOf(parent))
@@ -85,16 +83,3 @@ func testShardHead(t *testing.T, hasWriteCache bool) {
require.Equal(t, parent.CutPayload(), head.Object())
})
}
-
-func testHead(t *testing.T, sh *Shard, headPrm HeadPrm, hasWriteCache bool) (HeadRes, error) {
- res, err := sh.Head(context.Background(), headPrm)
- if hasWriteCache {
- require.Eventually(t, func() bool {
- if client.IsErrObjectNotFound(err) {
- res, err = sh.Head(context.Background(), headPrm)
- }
- return !client.IsErrObjectNotFound(err)
- }, time.Second, time.Millisecond*100)
- }
- return res, err
-}
diff --git a/pkg/local_object_storage/shard/inhume_test.go b/pkg/local_object_storage/shard/inhume_test.go
index 82754568f..1353d5d94 100644
--- a/pkg/local_object_storage/shard/inhume_test.go
+++ b/pkg/local_object_storage/shard/inhume_test.go
@@ -48,7 +48,7 @@ func testShardInhume(t *testing.T, hasWriteCache bool) {
_, err := sh.Put(context.Background(), putPrm)
require.NoError(t, err)
- _, err = testGet(t, sh, getPrm, hasWriteCache)
+ _, err = sh.Get(context.Background(), getPrm)
require.NoError(t, err)
_, err = sh.Inhume(context.Background(), inhPrm)
From d4493a6d082011cfa24df68b41d92c7b905fda27 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Mon, 23 Sep 2024 15:07:10 +0300
Subject: [PATCH 134/705] [#1390] getSvc: Fix Head EC1.1
If local EC chunk found, but remote node is off, then `HEAD --raw` request
returns object not found.
Signed-off-by: Dmitrii Stepanov
---
pkg/services/object/get/container.go | 12 +++++++++---
1 file changed, 9 insertions(+), 3 deletions(-)
diff --git a/pkg/services/object/get/container.go b/pkg/services/object/get/container.go
index d22b14192..034768c81 100644
--- a/pkg/services/object/get/container.go
+++ b/pkg/services/object/get/container.go
@@ -26,8 +26,10 @@ func (r *request) executeOnContainer(ctx context.Context) {
return
}
+ localStatus := r.status
+
for {
- if r.processCurrentEpoch(ctx) {
+ if r.processCurrentEpoch(ctx, localStatus) {
break
}
@@ -43,7 +45,7 @@ func (r *request) executeOnContainer(ctx context.Context) {
}
}
-func (r *request) processCurrentEpoch(ctx context.Context) bool {
+func (r *request) processCurrentEpoch(ctx context.Context, localStatus int) bool {
r.log.Debug(logs.ProcessEpoch,
zap.Uint64("number", r.curProcEpoch),
)
@@ -56,7 +58,11 @@ func (r *request) processCurrentEpoch(ctx context.Context) bool {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
- r.status = statusUndefined
+ if localStatus == statusEC { // possible only for raw == true and local == false
+ r.status = statusEC
+ } else {
+ r.status = statusUndefined
+ }
for {
addrs := traverser.Next()
From 3bb65ba820274a2014b3abfe6e11a98047b2059f Mon Sep 17 00:00:00 2001
From: Airat Arifullin
Date: Tue, 24 Sep 2024 11:46:15 +0300
Subject: [PATCH 135/705] [#1392] object: Fix target initialization within put
streamer
* Remove `relay` field from put streamer as it's no longer used;
* Fix initialization of `Relay` object writer parameter.
Signed-off-by: Airat Arifullin
---
pkg/services/object/put/streamer.go | 5 +----
1 file changed, 1 insertion(+), 4 deletions(-)
diff --git a/pkg/services/object/put/streamer.go b/pkg/services/object/put/streamer.go
index f3803d433..d08e7fafa 100644
--- a/pkg/services/object/put/streamer.go
+++ b/pkg/services/object/put/streamer.go
@@ -5,7 +5,6 @@ import (
"errors"
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/target"
objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/transformer"
@@ -15,8 +14,6 @@ type Streamer struct {
*objectwriter.Config
target transformer.ChunkedObjectWriter
-
- relay func(context.Context, client.NodeInfo, client.MultiAddressClient) error
}
var errNotInit = errors.New("stream not initialized")
@@ -35,7 +32,7 @@ func (p *Streamer) Init(ctx context.Context, prm *PutInitPrm) error {
Header: prm.hdr,
Container: prm.cnr,
TraverseOpts: prm.traverseOpts,
- Relay: p.relay,
+ Relay: prm.relay,
}
var err error
From 839dead226534887ffbb21b07327724e42cd2135 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Mon, 16 Sep 2024 12:38:45 +0300
Subject: [PATCH 136/705] [#1297] getSvc: Return AccessDenied instead of
ObjectNotFound
Do not replace the access denied error if it was received earlier.
Signed-off-by: Dmitrii Stepanov
---
pkg/services/object/get/remote.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/pkg/services/object/get/remote.go b/pkg/services/object/get/remote.go
index 163767c43..f2639f8e6 100644
--- a/pkg/services/object/get/remote.go
+++ b/pkg/services/object/get/remote.go
@@ -41,7 +41,7 @@ func (r *request) processNode(ctx context.Context, info client.NodeInfo) bool {
r.status = statusUndefined
if errors.As(err, &errAccessDenied) {
r.err = err
- } else {
+ } else if r.err == nil || !errors.As(r.err, &errAccessDenied) {
r.err = new(apistatus.ObjectNotFound)
}
}
From bdf386366c4e268d9f151c38c4eb5c837a49ab25 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Mon, 16 Sep 2024 12:40:12 +0300
Subject: [PATCH 137/705] [#1297] dev: Bump neo-go version
Signed-off-by: Dmitrii Stepanov
---
dev/docker-compose.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/dev/docker-compose.yml b/dev/docker-compose.yml
index 9d026797c..be1956e65 100644
--- a/dev/docker-compose.yml
+++ b/dev/docker-compose.yml
@@ -3,7 +3,7 @@
version: "2.4"
services:
neo-go:
- image: nspccdev/neo-go:0.105.0
+ image: nspccdev/neo-go:0.106.0
container_name: neo-go
command: ["node", "--config-path", "/config", "--privnet", "--debug"]
stop_signal: SIGKILL
From 34e6a309c6b1cdd4e277f76b63a6b5d01b094115 Mon Sep 17 00:00:00 2001
From: Anton Nikiforov
Date: Wed, 18 Sep 2024 12:15:32 +0300
Subject: [PATCH 138/705] [#1356] engine: Evacuate object from shards
concurrently
Signed-off-by: Anton Nikiforov
---
cmd/frostfs-cli/modules/control/evacuation.go | 15 +-
pkg/local_object_storage/engine/evacuate.go | 270 +++++++++++-------
.../engine/evacuate_test.go | 41 ++-
pkg/local_object_storage/metabase/list.go | 167 +++++++++++
pkg/local_object_storage/shard/list.go | 65 +++++
pkg/services/control/server/evacuate_async.go | 14 +-
pkg/services/control/service.proto | 4 +
pkg/services/control/service_frostfs.pb.go | 68 ++++-
8 files changed, 533 insertions(+), 111 deletions(-)
diff --git a/cmd/frostfs-cli/modules/control/evacuation.go b/cmd/frostfs-cli/modules/control/evacuation.go
index 6fa5ed75c..04a67e5b5 100644
--- a/cmd/frostfs-cli/modules/control/evacuation.go
+++ b/cmd/frostfs-cli/modules/control/evacuation.go
@@ -21,6 +21,9 @@ const (
noProgressFlag = "no-progress"
scopeFlag = "scope"
+ containerWorkerCountFlag = "container-worker-count"
+ objectWorkerCountFlag = "object-worker-count"
+
scopeAll = "all"
scopeObjects = "objects"
scopeTrees = "trees"
@@ -64,12 +67,16 @@ func startEvacuateShard(cmd *cobra.Command, _ []string) {
pk := key.Get(cmd)
ignoreErrors, _ := cmd.Flags().GetBool(ignoreErrorsFlag)
+ containerWorkerCount, _ := cmd.Flags().GetUint32(containerWorkerCountFlag)
+ objectWorkerCount, _ := cmd.Flags().GetUint32(objectWorkerCountFlag)
req := &control.StartShardEvacuationRequest{
Body: &control.StartShardEvacuationRequest_Body{
- Shard_ID: getShardIDList(cmd),
- IgnoreErrors: ignoreErrors,
- Scope: getEvacuationScope(cmd),
+ Shard_ID: getShardIDList(cmd),
+ IgnoreErrors: ignoreErrors,
+ Scope: getEvacuationScope(cmd),
+ ContainerWorkerCount: containerWorkerCount,
+ ObjectWorkerCount: objectWorkerCount,
},
}
@@ -371,6 +378,8 @@ func initControlStartEvacuationShardCmd() {
flags.String(scopeFlag, scopeAll, fmt.Sprintf("Evacuation scope; possible values: %s, %s, %s", scopeTrees, scopeObjects, scopeAll))
flags.Bool(awaitFlag, false, "Block execution until evacuation is completed")
flags.Bool(noProgressFlag, false, fmt.Sprintf("Print progress if %s provided", awaitFlag))
+ flags.Uint32(containerWorkerCountFlag, 0, "Count of concurrent container evacuation workers")
+ flags.Uint32(objectWorkerCountFlag, 0, "Count of concurrent object evacuation workers")
startEvacuationShardCmd.MarkFlagsMutuallyExclusive(shardIDFlag, shardAllFlag)
}
diff --git a/pkg/local_object_storage/engine/evacuate.go b/pkg/local_object_storage/engine/evacuate.go
index 7bef6edfb..3db556a8f 100644
--- a/pkg/local_object_storage/engine/evacuate.go
+++ b/pkg/local_object_storage/engine/evacuate.go
@@ -10,7 +10,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
- meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
@@ -24,6 +23,16 @@ import (
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
+ "golang.org/x/sync/errgroup"
+)
+
+const (
+ // containerWorkerCountDefault is a default value of the count of
+ // concurrent container evacuation workers.
+ containerWorkerCountDefault = 10
+ // objectWorkerCountDefault is a default value of the count of
+ // concurrent object evacuation workers.
+ objectWorkerCountDefault = 10
)
var (
@@ -79,6 +88,9 @@ type EvacuateShardPrm struct {
IgnoreErrors bool
Async bool
Scope EvacuateScope
+
+ ContainerWorkerCount uint32
+ ObjectWorkerCount uint32
}
// EvacuateShardRes represents result of the EvacuateShard operation.
@@ -189,8 +201,6 @@ func (p *EvacuateShardRes) DeepCopy() *EvacuateShardRes {
return res
}
-const defaultEvacuateBatchSize = 100
-
type pooledShard struct {
hashedShard
pool util.WorkerPool
@@ -242,8 +252,16 @@ func (e *StorageEngine) Evacuate(ctx context.Context, prm EvacuateShardPrm) (*Ev
return nil, err
}
+ var mtx sync.RWMutex
+ copyShards := func() []pooledShard {
+ mtx.RLock()
+ defer mtx.RUnlock()
+ t := make([]pooledShard, len(shards))
+ copy(t, shards)
+ return t
+ }
eg.Go(func() error {
- return e.evacuateShards(egCtx, shardIDs, prm, res, shards, shardsToEvacuate)
+ return e.evacuateShards(egCtx, shardIDs, prm, res, copyShards, shardsToEvacuate)
})
if prm.Async {
@@ -261,7 +279,7 @@ func ctxOrBackground(ctx context.Context, background bool) context.Context {
}
func (e *StorageEngine) evacuateShards(ctx context.Context, shardIDs []string, prm EvacuateShardPrm, res *EvacuateShardRes,
- shards []pooledShard, shardsToEvacuate map[string]*shard.Shard,
+ shards func() []pooledShard, shardsToEvacuate map[string]*shard.Shard,
) error {
var err error
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.evacuateShards",
@@ -287,13 +305,39 @@ func (e *StorageEngine) evacuateShards(ctx context.Context, shardIDs []string, p
return err
}
- for _, shardID := range shardIDs {
- if err = e.evacuateShard(ctx, shardID, prm, res, shards, shardsToEvacuate); err != nil {
- e.log.Error(logs.EngineFinishedWithErrorShardsEvacuation, zap.Error(err), zap.Strings("shard_ids", shardIDs), evacuationOperationLogField,
- zap.String("trace_id", tracingPkg.GetTraceID(ctx)), zap.Stringer("scope", prm.Scope))
- return err
+ ctx, cancel, egShard, egContainer, egObject := e.createErrorGroupsForEvacuation(ctx, prm)
+ continueLoop := true
+ for i := 0; continueLoop && i < len(shardIDs); i++ {
+ select {
+ case <-ctx.Done():
+ continueLoop = false
+ default:
+ egShard.Go(func() error {
+ err := e.evacuateShard(ctx, cancel, shardIDs[i], prm, res, shards, shardsToEvacuate, egContainer, egObject)
+ if err != nil {
+ cancel(err)
+ }
+ return err
+ })
}
}
+ err = egShard.Wait()
+ if err != nil {
+ err = fmt.Errorf("shard error: %w", err)
+ }
+ errContainer := egContainer.Wait()
+ errObject := egObject.Wait()
+ if errContainer != nil {
+ err = errors.Join(err, fmt.Errorf("container error: %w", errContainer))
+ }
+ if errObject != nil {
+ err = errors.Join(err, fmt.Errorf("object error: %w", errObject))
+ }
+ if err != nil {
+ e.log.Error(logs.EngineFinishedWithErrorShardsEvacuation, zap.Error(err), zap.Strings("shard_ids", shardIDs), evacuationOperationLogField,
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)), zap.Stringer("scope", prm.Scope))
+ return err
+ }
e.log.Info(logs.EngineFinishedSuccessfullyShardsEvacuation,
zap.Strings("shard_ids", shardIDs),
@@ -309,6 +353,27 @@ func (e *StorageEngine) evacuateShards(ctx context.Context, shardIDs []string, p
return nil
}
+func (e *StorageEngine) createErrorGroupsForEvacuation(ctx context.Context, prm EvacuateShardPrm) (
+ context.Context, context.CancelCauseFunc, *errgroup.Group, *errgroup.Group, *errgroup.Group,
+) {
+ operationCtx, cancel := context.WithCancelCause(ctx)
+ egObject, _ := errgroup.WithContext(operationCtx)
+ objectWorkerCount := prm.ObjectWorkerCount
+ if objectWorkerCount == 0 {
+ objectWorkerCount = objectWorkerCountDefault
+ }
+ egObject.SetLimit(int(objectWorkerCount))
+ egContainer, _ := errgroup.WithContext(operationCtx)
+ containerWorkerCount := prm.ContainerWorkerCount
+ if containerWorkerCount == 0 {
+ containerWorkerCount = containerWorkerCountDefault
+ }
+ egContainer.SetLimit(int(containerWorkerCount))
+ egShard, _ := errgroup.WithContext(operationCtx)
+
+ return operationCtx, cancel, egShard, egContainer, egObject
+}
+
func (e *StorageEngine) getTotals(ctx context.Context, prm EvacuateShardPrm, shardsToEvacuate map[string]*shard.Shard, res *EvacuateShardRes) error {
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.getTotals")
defer span.End()
@@ -335,8 +400,9 @@ func (e *StorageEngine) getTotals(ctx context.Context, prm EvacuateShardPrm, sha
return nil
}
-func (e *StorageEngine) evacuateShard(ctx context.Context, shardID string, prm EvacuateShardPrm, res *EvacuateShardRes,
- shards []pooledShard, shardsToEvacuate map[string]*shard.Shard,
+func (e *StorageEngine) evacuateShard(ctx context.Context, cancel context.CancelCauseFunc, shardID string, prm EvacuateShardPrm, res *EvacuateShardRes,
+ shards func() []pooledShard, shardsToEvacuate map[string]*shard.Shard,
+ egContainer *errgroup.Group, egObject *errgroup.Group,
) error {
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.evacuateShard",
trace.WithAttributes(
@@ -345,11 +411,10 @@ func (e *StorageEngine) evacuateShard(ctx context.Context, shardID string, prm E
defer span.End()
if prm.Scope.WithObjects() {
- if err := e.evacuateShardObjects(ctx, shardID, prm, res, shards, shardsToEvacuate); err != nil {
+ if err := e.evacuateShardObjects(ctx, cancel, shardID, prm, res, shards, shardsToEvacuate, egContainer, egObject); err != nil {
return err
}
}
-
if prm.Scope.WithTrees() && shardsToEvacuate[shardID].PiloramaEnabled() {
if err := e.evacuateShardTrees(ctx, shardID, prm, res, shards, shardsToEvacuate); err != nil {
return err
@@ -359,44 +424,60 @@ func (e *StorageEngine) evacuateShard(ctx context.Context, shardID string, prm E
return nil
}
-func (e *StorageEngine) evacuateShardObjects(ctx context.Context, shardID string, prm EvacuateShardPrm, res *EvacuateShardRes,
- shards []pooledShard, shardsToEvacuate map[string]*shard.Shard,
+func (e *StorageEngine) evacuateShardObjects(ctx context.Context, cancel context.CancelCauseFunc, shardID string, prm EvacuateShardPrm, res *EvacuateShardRes,
+ shards func() []pooledShard, shardsToEvacuate map[string]*shard.Shard,
+ egContainer *errgroup.Group, egObject *errgroup.Group,
) error {
- var listPrm shard.ListWithCursorPrm
- listPrm.WithCount(defaultEvacuateBatchSize)
-
sh := shardsToEvacuate[shardID]
- sh.SetEvacuationInProgress(true)
-
- var c *meta.Cursor
- for {
- listPrm.WithCursor(c)
-
- // TODO (@fyrchik): #1731 this approach doesn't work in degraded modes
- // because ListWithCursor works only with the metabase.
- listRes, err := sh.ListWithCursor(ctx, listPrm)
- if err != nil {
- if errors.Is(err, meta.ErrEndOfListing) || errors.Is(err, shard.ErrDegradedMode) {
- break
+ var cntPrm shard.IterateOverContainersPrm
+ cntPrm.Handler = func(ctx context.Context, name []byte, _ cid.ID) error {
+ select {
+ case <-ctx.Done():
+ return context.Cause(ctx)
+ default:
+ }
+ egContainer.Go(func() error {
+ var objPrm shard.IterateOverObjectsInContainerPrm
+ objPrm.BucketName = name
+ objPrm.Handler = func(ctx context.Context, objInfo *object.Info) error {
+ select {
+ case <-ctx.Done():
+ return context.Cause(ctx)
+ default:
+ }
+ egObject.Go(func() error {
+ err := e.evacuateObject(ctx, shardID, objInfo, prm, res, shards, shardsToEvacuate)
+ if err != nil {
+ cancel(err)
+ }
+ return err
+ })
+ return nil
+ }
+ err := sh.IterateOverObjectsInContainer(ctx, objPrm)
+ if err != nil {
+ cancel(err)
}
- e.log.Error(logs.EngineShardsEvacuationFailedToListObjects, zap.String("shard_id", shardID), zap.Error(err), evacuationOperationLogField,
- zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
return err
- }
-
- if err = e.evacuateObjects(ctx, sh, listRes.AddressList(), prm, res, shards, shardsToEvacuate); err != nil {
- return err
- }
-
- c = listRes.Cursor()
+ })
+ return nil
}
- return nil
+
+ sh.SetEvacuationInProgress(true)
+ err := sh.IterateOverContainers(ctx, cntPrm)
+ if err != nil {
+ cancel(err)
+ e.log.Error(logs.EngineShardsEvacuationFailedToListObjects, zap.String("shard_id", shardID), zap.Error(err), evacuationOperationLogField,
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
+ }
+ return err
}
func (e *StorageEngine) evacuateShardTrees(ctx context.Context, shardID string, prm EvacuateShardPrm, res *EvacuateShardRes,
- shards []pooledShard, shardsToEvacuate map[string]*shard.Shard,
+ getShards func() []pooledShard, shardsToEvacuate map[string]*shard.Shard,
) error {
sh := shardsToEvacuate[shardID]
+ shards := getShards()
var listPrm pilorama.TreeListTreesPrm
first := true
@@ -637,68 +718,65 @@ func (e *StorageEngine) getActualShards(shardIDs []string, prm EvacuateShardPrm)
return shards, nil
}
-func (e *StorageEngine) evacuateObjects(ctx context.Context, sh *shard.Shard, toEvacuate []object.Info, prm EvacuateShardPrm, res *EvacuateShardRes,
- shards []pooledShard, shardsToEvacuate map[string]*shard.Shard,
+func (e *StorageEngine) evacuateObject(ctx context.Context, shardID string, objInfo *object.Info, prm EvacuateShardPrm, res *EvacuateShardRes,
+ getShards func() []pooledShard, shardsToEvacuate map[string]*shard.Shard,
) error {
- ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.evacuateObjects",
- trace.WithAttributes(
- attribute.Int("objects_count", len(toEvacuate)),
- ))
+ ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.evacuateObjects")
defer span.End()
- for i := range toEvacuate {
- select {
- case <-ctx.Done():
- return ctx.Err()
- default:
- }
- addr := toEvacuate[i].Address
+ select {
+ case <-ctx.Done():
+ return context.Cause(ctx)
+ default:
+ }
- var getPrm shard.GetPrm
- getPrm.SetAddress(addr)
- getPrm.SkipEvacCheck(true)
+ shards := getShards()
+ addr := objInfo.Address
- getRes, err := sh.Get(ctx, getPrm)
- if err != nil {
- if prm.IgnoreErrors {
- res.objFailed.Add(1)
- continue
- }
- e.log.Error(logs.EngineShardsEvacuationFailedToReadObject, zap.String("address", addr.EncodeToString()), zap.Error(err), evacuationOperationLogField,
- zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
- return err
- }
+ var getPrm shard.GetPrm
+ getPrm.SetAddress(addr)
+ getPrm.SkipEvacCheck(true)
- evacuatedLocal, err := e.tryEvacuateObjectLocal(ctx, addr, getRes.Object(), sh, shards, shardsToEvacuate, res)
- if err != nil {
- return err
- }
-
- if evacuatedLocal {
- continue
- }
-
- if prm.ObjectsHandler == nil {
- // Do not check ignoreErrors flag here because
- // ignoring errors on put make this command kinda useless.
- return fmt.Errorf("%w: %s", errPutShard, toEvacuate[i])
- }
-
- moved, err := prm.ObjectsHandler(ctx, addr, getRes.Object())
- if err != nil {
- e.log.Error(logs.EngineShardsEvacuationFailedToMoveObject, zap.String("address", addr.EncodeToString()), zap.Error(err), evacuationOperationLogField,
- zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
- return err
- }
- if moved {
- res.objEvacuated.Add(1)
- } else if prm.IgnoreErrors {
+ getRes, err := shardsToEvacuate[shardID].Get(ctx, getPrm)
+ if err != nil {
+ if prm.IgnoreErrors {
res.objFailed.Add(1)
- e.log.Warn(logs.EngineShardsEvacuationFailedToMoveObject, zap.String("address", addr.EncodeToString()), zap.Error(err), evacuationOperationLogField,
- zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
- } else {
- return fmt.Errorf("object %s was not replicated", addr)
+ return nil
}
+ e.log.Error(logs.EngineShardsEvacuationFailedToReadObject, zap.String("address", addr.EncodeToString()), zap.Error(err), evacuationOperationLogField,
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
+ return err
+ }
+
+ evacuatedLocal, err := e.tryEvacuateObjectLocal(ctx, addr, getRes.Object(), shardsToEvacuate[shardID], shards, shardsToEvacuate, res)
+ if err != nil {
+ return err
+ }
+
+ if evacuatedLocal {
+ return nil
+ }
+
+ if prm.ObjectsHandler == nil {
+ // Do not check ignoreErrors flag here because
+ // ignoring errors on put make this command kinda useless.
+ return fmt.Errorf("%w: %s", errPutShard, objInfo)
+ }
+
+ moved, err := prm.ObjectsHandler(ctx, addr, getRes.Object())
+ if err != nil {
+ e.log.Error(logs.EngineShardsEvacuationFailedToMoveObject, zap.String("address", addr.EncodeToString()), zap.Error(err), evacuationOperationLogField,
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
+ return err
+ }
+ if moved {
+ res.objEvacuated.Add(1)
+ } else if prm.IgnoreErrors {
+ res.objFailed.Add(1)
+ e.log.Warn(logs.EngineShardsEvacuationFailedToMoveObject, zap.String("address", addr.EncodeToString()), zap.Error(err), evacuationOperationLogField,
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
+ } else {
+ return fmt.Errorf("object %s was not replicated", addr)
}
return nil
}
diff --git a/pkg/local_object_storage/engine/evacuate_test.go b/pkg/local_object_storage/engine/evacuate_test.go
index 28529fab9..f72333399 100644
--- a/pkg/local_object_storage/engine/evacuate_test.go
+++ b/pkg/local_object_storage/engine/evacuate_test.go
@@ -6,6 +6,8 @@ import (
"fmt"
"path/filepath"
"strconv"
+ "sync"
+ "sync/atomic"
"testing"
"time"
@@ -174,13 +176,13 @@ func TestEvacuateObjectsNetwork(t *testing.T) {
errReplication := errors.New("handler error")
acceptOneOf := func(objects []*objectSDK.Object, max uint64) func(context.Context, oid.Address, *objectSDK.Object) (bool, error) {
- var n uint64
+ var n atomic.Uint64
return func(_ context.Context, addr oid.Address, obj *objectSDK.Object) (bool, error) {
- if n == max {
+ if n.Load() == max {
return false, errReplication
}
- n++
+ n.Add(1)
for i := range objects {
if addr == objectCore.AddressOf(objects[i]) {
require.Equal(t, objects[i], obj)
@@ -314,6 +316,36 @@ func TestEvacuateCancellation(t *testing.T) {
require.Equal(t, uint64(0), res.ObjectsEvacuated())
}
+func TestEvacuateCancellationByError(t *testing.T) {
+ t.Parallel()
+ e, ids, _ := newEngineEvacuate(t, 2, 10)
+ defer func() {
+ require.NoError(t, e.Close(context.Background()))
+ }()
+
+ require.NoError(t, e.shards[ids[0].String()].SetMode(mode.ReadOnly))
+ require.NoError(t, e.shards[ids[1].String()].SetMode(mode.ReadOnly))
+
+ var prm EvacuateShardPrm
+ prm.ShardID = ids[1:2]
+ var once atomic.Bool
+ prm.ObjectsHandler = func(ctx context.Context, a oid.Address, o *objectSDK.Object) (bool, error) {
+ var err error
+ flag := true
+ if once.CompareAndSwap(false, true) {
+ err = errors.New("test error")
+ flag = false
+ }
+ return flag, err
+ }
+ prm.Scope = EvacuateScopeObjects
+ prm.ObjectWorkerCount = 2
+ prm.ContainerWorkerCount = 2
+
+ _, err := e.Evacuate(context.Background(), prm)
+ require.ErrorContains(t, err, "test error")
+}
+
func TestEvacuateSingleProcess(t *testing.T) {
e, ids, _ := newEngineEvacuate(t, 2, 3)
defer func() {
@@ -531,6 +563,7 @@ func TestEvacuateTreesRemote(t *testing.T) {
require.NoError(t, e.shards[ids[0].String()].SetMode(mode.ReadOnly))
require.NoError(t, e.shards[ids[1].String()].SetMode(mode.ReadOnly))
+ mutex := sync.Mutex{}
evacuatedTreeOps := make(map[string][]*pilorama.Move)
var prm EvacuateShardPrm
prm.ShardID = ids
@@ -545,7 +578,9 @@ func TestEvacuateTreesRemote(t *testing.T) {
if op.Time == 0 {
return true, "", nil
}
+ mutex.Lock()
evacuatedTreeOps[key] = append(evacuatedTreeOps[key], &op)
+ mutex.Unlock()
height = op.Time + 1
}
}
diff --git a/pkg/local_object_storage/metabase/list.go b/pkg/local_object_storage/metabase/list.go
index b4326a92c..5943be7f4 100644
--- a/pkg/local_object_storage/metabase/list.go
+++ b/pkg/local_object_storage/metabase/list.go
@@ -1,6 +1,7 @@
package meta
import (
+ "bytes"
"context"
"time"
@@ -61,6 +62,20 @@ func (l ListRes) Cursor() *Cursor {
return l.cursor
}
+// IterateOverContainersPrm contains parameters for IterateOverContainers operation.
+type IterateOverContainersPrm struct {
+ // Handler function executed upon containers in db.
+ Handler func(context.Context, []byte, cid.ID) error
+}
+
+// IterateOverObjectsInContainerPrm contains parameters for IterateOverObjectsInContainer operation.
+type IterateOverObjectsInContainerPrm struct {
+ // BucketName container's bucket name.
+ BucketName []byte
+ // Handler function executed upon objects in db.
+ Handler func(context.Context, *objectcore.Info) error
+}
+
// ListWithCursor lists physical objects available in metabase starting from
// cursor. Includes objects of all types. Does not include inhumed objects.
// Use cursor value from response for consecutive requests.
@@ -259,3 +274,155 @@ func parseContainerIDWithPrefix(containerID *cid.ID, name []byte) ([]byte, byte)
return rawID, name[0]
}
+
+// IterateOverContainers lists physical containers available in metabase starting from first.
+func (db *DB) IterateOverContainers(ctx context.Context, prm IterateOverContainersPrm) error {
+ var (
+ startedAt = time.Now()
+ success = false
+ )
+ defer func() {
+ db.metrics.AddMethodDuration("IterateOverContainers", time.Since(startedAt), success)
+ }()
+ _, span := tracing.StartSpanFromContext(ctx, "metabase.IterateOverContainers",
+ trace.WithAttributes(
+ attribute.Bool("has_handler", prm.Handler != nil),
+ ))
+ defer span.End()
+
+ db.modeMtx.RLock()
+ defer db.modeMtx.RUnlock()
+
+ if db.mode.NoMetabase() {
+ return ErrDegradedMode
+ }
+
+ err := db.boltDB.View(func(tx *bbolt.Tx) error {
+ return db.iterateOverContainers(ctx, tx, prm)
+ })
+ success = err == nil
+ return metaerr.Wrap(err)
+}
+
+func (db *DB) iterateOverContainers(ctx context.Context, tx *bbolt.Tx, prm IterateOverContainersPrm) error {
+ var containerID cid.ID
+ for _, prefix := range [][]byte{{byte(primaryPrefix)}, {byte(lockersPrefix)}, {byte(tombstonePrefix)}} {
+ c := tx.Cursor()
+ for name, _ := c.Seek(prefix); name != nil && bytes.HasPrefix(name, prefix); name, _ = c.Next() {
+ cidRaw, _ := parseContainerIDWithPrefix(&containerID, name)
+ if cidRaw == nil {
+ continue
+ }
+
+ bktName := make([]byte, len(name))
+ copy(bktName, name)
+ var cnt cid.ID
+ copy(cnt[:], containerID[:])
+ err := prm.Handler(ctx, bktName, cnt)
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+// IterateOverObjectsInContainer iterate over physical objects available in metabase starting from first.
+func (db *DB) IterateOverObjectsInContainer(ctx context.Context, prm IterateOverObjectsInContainerPrm) error {
+ var (
+ startedAt = time.Now()
+ success = false
+ )
+ defer func() {
+ db.metrics.AddMethodDuration("IterateOverObjectsInContainer", time.Since(startedAt), success)
+ }()
+ _, span := tracing.StartSpanFromContext(ctx, "metabase.IterateOverObjectsInContainer",
+ trace.WithAttributes(
+ attribute.Bool("has_handler", prm.Handler != nil),
+ ))
+ defer span.End()
+
+ db.modeMtx.RLock()
+ defer db.modeMtx.RUnlock()
+
+ if db.mode.NoMetabase() {
+ return ErrDegradedMode
+ }
+
+ var containerID cid.ID
+ cidRaw, prefix := parseContainerIDWithPrefix(&containerID, prm.BucketName)
+ if cidRaw == nil {
+ return nil
+ }
+ err := db.boltDB.View(func(tx *bbolt.Tx) error {
+ return db.iterateOverObjectsInContainer(ctx, tx, cidRaw, prefix, containerID, prm)
+ })
+ success = err == nil
+ return metaerr.Wrap(err)
+}
+
+func (db *DB) iterateOverObjectsInContainer(ctx context.Context, tx *bbolt.Tx, cidRaw []byte, prefix byte,
+ containerID cid.ID, prm IterateOverObjectsInContainerPrm,
+) error {
+ bkt := tx.Bucket(prm.BucketName)
+ if bkt == nil {
+ return nil
+ }
+ graveyardBkt := tx.Bucket(graveyardBucketName)
+ garbageBkt := tx.Bucket(garbageBucketName)
+ c := bkt.Cursor()
+ k, v := c.First()
+
+ var objType objectSDK.Type
+
+ switch prefix {
+ case primaryPrefix:
+ objType = objectSDK.TypeRegular
+ case lockersPrefix:
+ objType = objectSDK.TypeLock
+ case tombstonePrefix:
+ objType = objectSDK.TypeTombstone
+ default:
+ return nil
+ }
+
+ for ; k != nil; k, v = c.Next() {
+ var obj oid.ID
+ if err := obj.Decode(k); err != nil {
+ break
+ }
+
+ if inGraveyardWithKey(append(cidRaw, k...), graveyardBkt, garbageBkt) > 0 {
+ continue
+ }
+
+ var isLinkingObj bool
+ var ecInfo *objectcore.ECInfo
+ if objType == objectSDK.TypeRegular {
+ var o objectSDK.Object
+ if err := o.Unmarshal(v); err != nil {
+ return err
+ }
+ isLinkingObj = isLinkObject(&o)
+ ecHeader := o.ECHeader()
+ if ecHeader != nil {
+ ecInfo = &objectcore.ECInfo{
+ ParentID: ecHeader.Parent(),
+ Index: ecHeader.Index(),
+ Total: ecHeader.Total(),
+ }
+ }
+ }
+
+ var a oid.Address
+ a.SetContainer(containerID)
+ a.SetObject(obj)
+ objInfo := objectcore.Info{Address: a, Type: objType, IsLinkingObject: isLinkingObj, ECInfo: ecInfo}
+ err := prm.Handler(ctx, &objInfo)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/pkg/local_object_storage/shard/list.go b/pkg/local_object_storage/shard/list.go
index 08ea81a0c..9f56ec750 100644
--- a/pkg/local_object_storage/shard/list.go
+++ b/pkg/local_object_storage/shard/list.go
@@ -34,6 +34,20 @@ func (r ListContainersRes) Containers() []cid.ID {
return r.containers
}
+// IterateOverContainersPrm contains parameters for IterateOverContainers operation.
+type IterateOverContainersPrm struct {
+ // Handler function executed upon containers in db.
+ Handler func(context.Context, []byte, cid.ID) error
+}
+
+// IterateOverObjectsInContainerPrm contains parameters for IterateOverObjectsInContainer operation.
+type IterateOverObjectsInContainerPrm struct {
+ // BucketName container's bucket name.
+ BucketName []byte
+ // Handler function executed upon containers in db.
+ Handler func(context.Context, *objectcore.Info) error
+}
+
// ListWithCursorPrm contains parameters for ListWithCursor operation.
type ListWithCursorPrm struct {
count uint32
@@ -164,3 +178,54 @@ func (s *Shard) ListWithCursor(ctx context.Context, prm ListWithCursorPrm) (List
cursor: res.Cursor(),
}, nil
}
+
+// IterateOverContainers lists physical containers presented in shard.
+func (s *Shard) IterateOverContainers(ctx context.Context, prm IterateOverContainersPrm) error {
+ _, span := tracing.StartSpanFromContext(ctx, "shard.IterateOverContainers",
+ trace.WithAttributes(
+ attribute.Bool("has_handler", prm.Handler != nil),
+ ))
+ defer span.End()
+
+ s.m.RLock()
+ defer s.m.RUnlock()
+
+ if s.info.Mode.NoMetabase() {
+ return ErrDegradedMode
+ }
+
+ var metaPrm meta.IterateOverContainersPrm
+ metaPrm.Handler = prm.Handler
+ err := s.metaBase.IterateOverContainers(ctx, metaPrm)
+ if err != nil {
+ return fmt.Errorf("could not iterate over containers: %w", err)
+ }
+
+ return nil
+}
+
+// IterateOverObjectsInContainer lists physical objects presented in shard for provided container's bucket name.
+func (s *Shard) IterateOverObjectsInContainer(ctx context.Context, prm IterateOverObjectsInContainerPrm) error {
+ _, span := tracing.StartSpanFromContext(ctx, "shard.IterateOverObjectsInContainer",
+ trace.WithAttributes(
+ attribute.Bool("has_handler", prm.Handler != nil),
+ ))
+ defer span.End()
+
+ s.m.RLock()
+ defer s.m.RUnlock()
+
+ if s.info.Mode.NoMetabase() {
+ return ErrDegradedMode
+ }
+
+ var metaPrm meta.IterateOverObjectsInContainerPrm
+ metaPrm.BucketName = prm.BucketName
+ metaPrm.Handler = prm.Handler
+ err := s.metaBase.IterateOverObjectsInContainer(ctx, metaPrm)
+ if err != nil {
+ return fmt.Errorf("could not iterate over objects: %w", err)
+ }
+
+ return nil
+}
diff --git a/pkg/services/control/server/evacuate_async.go b/pkg/services/control/server/evacuate_async.go
index aacebe9e3..bdc6f7c38 100644
--- a/pkg/services/control/server/evacuate_async.go
+++ b/pkg/services/control/server/evacuate_async.go
@@ -23,12 +23,14 @@ func (s *Server) StartShardEvacuation(ctx context.Context, req *control.StartSha
}
prm := engine.EvacuateShardPrm{
- ShardID: s.getShardIDList(req.GetBody().GetShard_ID()),
- IgnoreErrors: req.GetBody().GetIgnoreErrors(),
- ObjectsHandler: s.replicateObject,
- TreeHandler: s.replicateTree,
- Async: true,
- Scope: engine.EvacuateScope(req.GetBody().GetScope()),
+ ShardID: s.getShardIDList(req.GetBody().GetShard_ID()),
+ IgnoreErrors: req.GetBody().GetIgnoreErrors(),
+ ObjectsHandler: s.replicateObject,
+ TreeHandler: s.replicateTree,
+ Async: true,
+ Scope: engine.EvacuateScope(req.GetBody().GetScope()),
+ ContainerWorkerCount: req.GetBody().GetContainerWorkerCount(),
+ ObjectWorkerCount: req.GetBody().GetObjectWorkerCount(),
}
_, err = s.s.Evacuate(ctx, prm)
diff --git a/pkg/services/control/service.proto b/pkg/services/control/service.proto
index 04994328a..88a06de22 100644
--- a/pkg/services/control/service.proto
+++ b/pkg/services/control/service.proto
@@ -394,6 +394,10 @@ message StartShardEvacuationRequest {
bool ignore_errors = 2;
// Evacuation scope.
uint32 scope = 3;
+ // Count of concurrent container evacuation workers.
+ uint32 container_worker_count = 4;
+ // Count of concurrent object evacuation workers.
+ uint32 object_worker_count = 5;
}
Body body = 1;
diff --git a/pkg/services/control/service_frostfs.pb.go b/pkg/services/control/service_frostfs.pb.go
index 019cac290..e92a8acd1 100644
--- a/pkg/services/control/service_frostfs.pb.go
+++ b/pkg/services/control/service_frostfs.pb.go
@@ -6511,9 +6511,11 @@ func (x *StartShardEvacuationRequest_Body_Scope) FromString(s string) bool {
}
type StartShardEvacuationRequest_Body struct {
- Shard_ID [][]byte `json:"shardID"`
- IgnoreErrors bool `json:"ignoreErrors"`
- Scope uint32 `json:"scope"`
+ Shard_ID [][]byte `json:"shardID"`
+ IgnoreErrors bool `json:"ignoreErrors"`
+ Scope uint32 `json:"scope"`
+ ContainerWorkerCount uint32 `json:"containerWorkerCount"`
+ ObjectWorkerCount uint32 `json:"objectWorkerCount"`
}
var (
@@ -6533,6 +6535,8 @@ func (x *StartShardEvacuationRequest_Body) StableSize() (size int) {
size += proto.RepeatedBytesSize(1, x.Shard_ID)
size += proto.BoolSize(2, x.IgnoreErrors)
size += proto.UInt32Size(3, x.Scope)
+ size += proto.UInt32Size(4, x.ContainerWorkerCount)
+ size += proto.UInt32Size(5, x.ObjectWorkerCount)
return size
}
@@ -6558,6 +6562,12 @@ func (x *StartShardEvacuationRequest_Body) EmitProtobuf(mm *easyproto.MessageMar
if x.Scope != 0 {
mm.AppendUint32(3, x.Scope)
}
+ if x.ContainerWorkerCount != 0 {
+ mm.AppendUint32(4, x.ContainerWorkerCount)
+ }
+ if x.ObjectWorkerCount != 0 {
+ mm.AppendUint32(5, x.ObjectWorkerCount)
+ }
}
// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
@@ -6587,6 +6597,18 @@ func (x *StartShardEvacuationRequest_Body) UnmarshalProtobuf(src []byte) (err er
return fmt.Errorf("cannot unmarshal field %s", "Scope")
}
x.Scope = data
+ case 4: // ContainerWorkerCount
+ data, ok := fc.Uint32()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "ContainerWorkerCount")
+ }
+ x.ContainerWorkerCount = data
+ case 5: // ObjectWorkerCount
+ data, ok := fc.Uint32()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "ObjectWorkerCount")
+ }
+ x.ObjectWorkerCount = data
}
}
return nil
@@ -6618,6 +6640,24 @@ func (x *StartShardEvacuationRequest_Body) GetScope() uint32 {
func (x *StartShardEvacuationRequest_Body) SetScope(v uint32) {
x.Scope = v
}
+func (x *StartShardEvacuationRequest_Body) GetContainerWorkerCount() uint32 {
+ if x != nil {
+ return x.ContainerWorkerCount
+ }
+ return 0
+}
+func (x *StartShardEvacuationRequest_Body) SetContainerWorkerCount(v uint32) {
+ x.ContainerWorkerCount = v
+}
+func (x *StartShardEvacuationRequest_Body) GetObjectWorkerCount() uint32 {
+ if x != nil {
+ return x.ObjectWorkerCount
+ }
+ return 0
+}
+func (x *StartShardEvacuationRequest_Body) SetObjectWorkerCount(v uint32) {
+ x.ObjectWorkerCount = v
+}
// MarshalJSON implements the json.Marshaler interface.
func (x *StartShardEvacuationRequest_Body) MarshalJSON() ([]byte, error) {
@@ -6653,6 +6693,16 @@ func (x *StartShardEvacuationRequest_Body) MarshalEasyJSON(out *jwriter.Writer)
out.RawString(prefix)
out.Uint32(x.Scope)
}
+ {
+ const prefix string = ",\"containerWorkerCount\":"
+ out.RawString(prefix)
+ out.Uint32(x.ContainerWorkerCount)
+ }
+ {
+ const prefix string = ",\"objectWorkerCount\":"
+ out.RawString(prefix)
+ out.Uint32(x.ObjectWorkerCount)
+ }
out.RawByte('}')
}
@@ -6706,6 +6756,18 @@ func (x *StartShardEvacuationRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
f = in.Uint32()
x.Scope = f
}
+ case "containerWorkerCount":
+ {
+ var f uint32
+ f = in.Uint32()
+ x.ContainerWorkerCount = f
+ }
+ case "objectWorkerCount":
+ {
+ var f uint32
+ f = in.Uint32()
+ x.ObjectWorkerCount = f
+ }
}
in.WantComma()
}
From 8434f3dbfc850839c759430cea9640f3c87e5f95 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Thu, 19 Sep 2024 17:00:58 +0300
Subject: [PATCH 139/705] [#1385] metabase: Use `Batch` for delete-related
operations
Signed-off-by: Dmitrii Stepanov
---
pkg/local_object_storage/metabase/counter.go | 4 ++--
pkg/local_object_storage/metabase/delete.go | 2 +-
pkg/local_object_storage/metabase/graveyard.go | 2 +-
pkg/local_object_storage/metabase/inhume.go | 2 +-
pkg/local_object_storage/metabase/lock.go | 4 ++--
5 files changed, 7 insertions(+), 7 deletions(-)
diff --git a/pkg/local_object_storage/metabase/counter.go b/pkg/local_object_storage/metabase/counter.go
index 275099ff2..3ead0d9a0 100644
--- a/pkg/local_object_storage/metabase/counter.go
+++ b/pkg/local_object_storage/metabase/counter.go
@@ -654,7 +654,7 @@ func (db *DB) DeleteContainerSize(ctx context.Context, id cid.ID) error {
return ErrReadOnlyMode
}
- err := db.boltDB.Update(func(tx *bbolt.Tx) error {
+ err := db.boltDB.Batch(func(tx *bbolt.Tx) error {
b := tx.Bucket(containerVolumeBucketName)
key := make([]byte, cidSize)
@@ -737,7 +737,7 @@ func (db *DB) DeleteContainerCount(ctx context.Context, id cid.ID) error {
return ErrReadOnlyMode
}
- err := db.boltDB.Update(func(tx *bbolt.Tx) error {
+ err := db.boltDB.Batch(func(tx *bbolt.Tx) error {
b := tx.Bucket(containerCounterBucketName)
key := make([]byte, cidSize)
diff --git a/pkg/local_object_storage/metabase/delete.go b/pkg/local_object_storage/metabase/delete.go
index 00c8d06e0..e5e9840a0 100644
--- a/pkg/local_object_storage/metabase/delete.go
+++ b/pkg/local_object_storage/metabase/delete.go
@@ -112,7 +112,7 @@ func (db *DB) Delete(ctx context.Context, prm DeletePrm) (DeleteRes, error) {
var err error
var res DeleteRes
- err = db.boltDB.Update(func(tx *bbolt.Tx) error {
+ err = db.boltDB.Batch(func(tx *bbolt.Tx) error {
res, err = db.deleteGroup(tx, prm.addrs)
return err
})
diff --git a/pkg/local_object_storage/metabase/graveyard.go b/pkg/local_object_storage/metabase/graveyard.go
index 80d40fb78..31f95d6ed 100644
--- a/pkg/local_object_storage/metabase/graveyard.go
+++ b/pkg/local_object_storage/metabase/graveyard.go
@@ -282,7 +282,7 @@ func (db *DB) DropGraves(ctx context.Context, tss []TombstonedObject) error {
buf := make([]byte, addressKeySize)
- return db.boltDB.Update(func(tx *bbolt.Tx) error {
+ return db.boltDB.Batch(func(tx *bbolt.Tx) error {
bkt := tx.Bucket(graveyardBucketName)
if bkt == nil {
return nil
diff --git a/pkg/local_object_storage/metabase/inhume.go b/pkg/local_object_storage/metabase/inhume.go
index b62accc43..3aae15061 100644
--- a/pkg/local_object_storage/metabase/inhume.go
+++ b/pkg/local_object_storage/metabase/inhume.go
@@ -181,7 +181,7 @@ func (db *DB) Inhume(ctx context.Context, prm InhumePrm) (InhumeRes, error) {
inhumedByCnrID: make(map[cid.ID]ObjectCounters),
}
currEpoch := db.epochState.CurrentEpoch()
- err := db.boltDB.Update(func(tx *bbolt.Tx) error {
+ err := db.boltDB.Batch(func(tx *bbolt.Tx) error {
return db.inhumeTx(tx, currEpoch, prm, &res)
})
success = err == nil
diff --git a/pkg/local_object_storage/metabase/lock.go b/pkg/local_object_storage/metabase/lock.go
index 732ba426d..6b78ef392 100644
--- a/pkg/local_object_storage/metabase/lock.go
+++ b/pkg/local_object_storage/metabase/lock.go
@@ -78,7 +78,7 @@ func (db *DB) lockInternal(locked []oid.ID, cnr cid.ID, locker oid.ID) error {
}
key := make([]byte, cidSize)
- return metaerr.Wrap(db.boltDB.Update(func(tx *bbolt.Tx) error {
+ return metaerr.Wrap(db.boltDB.Batch(func(tx *bbolt.Tx) error {
if firstIrregularObjectType(tx, cnr, bucketKeysLocked...) != objectSDK.TypeRegular {
return logicerr.Wrap(new(apistatus.LockNonRegularObject))
}
@@ -143,7 +143,7 @@ func (db *DB) FreeLockedBy(lockers []oid.Address) ([]oid.Address, error) {
var unlockedObjects []oid.Address
- if err := db.boltDB.Update(func(tx *bbolt.Tx) error {
+ if err := db.boltDB.Batch(func(tx *bbolt.Tx) error {
for i := range lockers {
unlocked, err := freePotentialLocks(tx, lockers[i].Container(), lockers[i].Object())
if err != nil {
From 76268e3ea2a73072119ea1963f914646c029e08a Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Fri, 20 Sep 2024 13:28:21 +0300
Subject: [PATCH 140/705] [#1385] metabase: Validate that tombstone and target
have the same container ID
Target container ID is taken from tombstone: cmd/frostfs-node/object.go:507
Also object of type `TOMBSTONE` contains objectID, so tombstone and
tombstoned object must have the same containerID.
Signed-off-by: Dmitrii Stepanov
---
pkg/local_object_storage/engine/lock_test.go | 8 +++--
.../metabase/containers_test.go | 4 +--
.../metabase/control_test.go | 2 +-
.../metabase/counter_test.go | 30 ++++++++++++-------
.../metabase/delete_test.go | 6 ++--
.../metabase/exists_test.go | 2 +-
pkg/local_object_storage/metabase/get_test.go | 3 +-
.../metabase/graveyard_test.go | 27 ++++++++++-------
pkg/local_object_storage/metabase/inhume.go | 18 +++++++++++
.../metabase/inhume_test.go | 21 +++++++++----
.../metabase/iterators_test.go | 6 ++++
.../metabase/list_test.go | 2 +-
.../metabase/lock_test.go | 10 +++++--
.../metabase/select_test.go | 6 +---
.../metabase/storage_id_test.go | 2 +-
.../shard/metrics_test.go | 13 ++++----
16 files changed, 108 insertions(+), 52 deletions(-)
diff --git a/pkg/local_object_storage/engine/lock_test.go b/pkg/local_object_storage/engine/lock_test.go
index 7fa7c27ef..9e6758fb4 100644
--- a/pkg/local_object_storage/engine/lock_test.go
+++ b/pkg/local_object_storage/engine/lock_test.go
@@ -199,7 +199,9 @@ func TestLockExpiration(t *testing.T) {
require.NoError(t, err)
var inhumePrm InhumePrm
- inhumePrm.WithTarget(oidtest.Address(), objectcore.AddressOf(obj))
+ tombAddr := oidtest.Address()
+ tombAddr.SetContainer(cnr)
+ inhumePrm.WithTarget(tombAddr, objectcore.AddressOf(obj))
var objLockedErr *apistatus.ObjectLocked
_, err = e.Inhume(context.Background(), inhumePrm)
@@ -209,7 +211,9 @@ func TestLockExpiration(t *testing.T) {
e.HandleNewEpoch(context.Background(), lockerExpiresAfter+1)
// 4.
- inhumePrm.WithTarget(oidtest.Address(), objectcore.AddressOf(obj))
+ tombAddr = oidtest.Address()
+ tombAddr.SetContainer(cnr)
+ inhumePrm.WithTarget(tombAddr, objectcore.AddressOf(obj))
require.Eventually(t, func() bool {
_, err = e.Inhume(context.Background(), inhumePrm)
diff --git a/pkg/local_object_storage/metabase/containers_test.go b/pkg/local_object_storage/metabase/containers_test.go
index 8b1874458..110be68ad 100644
--- a/pkg/local_object_storage/metabase/containers_test.go
+++ b/pkg/local_object_storage/metabase/containers_test.go
@@ -67,7 +67,7 @@ func TestDB_Containers(t *testing.T) {
assertContains(cnrs, cnr)
- require.NoError(t, metaInhume(db, object.AddressOf(obj), oidtest.Address()))
+ require.NoError(t, metaInhume(db, object.AddressOf(obj), oidtest.ID()))
cnrs, err = db.Containers(context.Background())
require.NoError(t, err)
@@ -164,7 +164,7 @@ func TestDB_ContainerSize(t *testing.T) {
require.NoError(t, metaInhume(
db,
object.AddressOf(obj),
- oidtest.Address(),
+ oidtest.ID(),
))
volume -= int(obj.PayloadSize())
diff --git a/pkg/local_object_storage/metabase/control_test.go b/pkg/local_object_storage/metabase/control_test.go
index 0354a5eb6..2a64881cb 100644
--- a/pkg/local_object_storage/metabase/control_test.go
+++ b/pkg/local_object_storage/metabase/control_test.go
@@ -41,7 +41,7 @@ func TestReset(t *testing.T) {
err = putBig(db, obj)
require.NoError(t, err)
- err = metaInhume(db, addrToInhume, oidtest.Address())
+ err = metaInhume(db, addrToInhume, oidtest.ID())
require.NoError(t, err)
assertExists(addr, true, nil)
diff --git a/pkg/local_object_storage/metabase/counter_test.go b/pkg/local_object_storage/metabase/counter_test.go
index d1f808a63..dccccd456 100644
--- a/pkg/local_object_storage/metabase/counter_test.go
+++ b/pkg/local_object_storage/metabase/counter_test.go
@@ -156,13 +156,18 @@ func TestCounters(t *testing.T) {
}
var prm meta.InhumePrm
- prm.SetTombstoneAddress(oidtest.Address())
- prm.SetAddresses(inhumedObjs...)
+ for _, o := range inhumedObjs {
+ tombAddr := oidtest.Address()
+ tombAddr.SetContainer(o.Container())
- res, err := db.Inhume(context.Background(), prm)
- require.NoError(t, err)
- require.Equal(t, uint64(len(inhumedObjs)), res.LogicInhumed())
- require.Equal(t, uint64(len(inhumedObjs)), res.UserInhumed())
+ prm.SetTombstoneAddress(tombAddr)
+ prm.SetAddresses(o)
+
+ res, err := db.Inhume(context.Background(), prm)
+ require.NoError(t, err)
+ require.Equal(t, uint64(1), res.LogicInhumed())
+ require.Equal(t, uint64(1), res.UserInhumed())
+ }
c, err := db.ObjectCounters()
require.NoError(t, err)
@@ -296,11 +301,16 @@ func TestCounters(t *testing.T) {
}
var prm meta.InhumePrm
- prm.SetTombstoneAddress(oidtest.Address())
- prm.SetAddresses(inhumedObjs...)
+ for _, o := range inhumedObjs {
+ tombAddr := oidtest.Address()
+ tombAddr.SetContainer(o.Container())
- _, err := db.Inhume(context.Background(), prm)
- require.NoError(t, err)
+ prm.SetTombstoneAddress(tombAddr)
+ prm.SetAddresses(o)
+
+ _, err := db.Inhume(context.Background(), prm)
+ require.NoError(t, err)
+ }
c, err := db.ObjectCounters()
require.NoError(t, err)
diff --git a/pkg/local_object_storage/metabase/delete_test.go b/pkg/local_object_storage/metabase/delete_test.go
index cb85157e7..fe5f7833b 100644
--- a/pkg/local_object_storage/metabase/delete_test.go
+++ b/pkg/local_object_storage/metabase/delete_test.go
@@ -40,12 +40,12 @@ func TestDB_Delete(t *testing.T) {
// inhume parent and child so they will be on graveyard
ts := testutil.GenerateObjectWithCID(cnr)
- err = metaInhume(db, object.AddressOf(child), object.AddressOf(ts))
+ err = metaInhume(db, object.AddressOf(child), object.AddressOf(ts).Object())
require.NoError(t, err)
ts = testutil.GenerateObjectWithCID(cnr)
- err = metaInhume(db, object.AddressOf(parent), object.AddressOf(ts))
+ err = metaInhume(db, object.AddressOf(parent), object.AddressOf(ts).Object())
require.NoError(t, err)
// delete object
@@ -108,7 +108,7 @@ func TestGraveOnlyDelete(t *testing.T) {
addr := oidtest.Address()
// inhume non-existent object by address
- require.NoError(t, metaInhume(db, addr, oidtest.Address()))
+ require.NoError(t, metaInhume(db, addr, oidtest.ID()))
// delete the object data
require.NoError(t, metaDelete(db, addr))
diff --git a/pkg/local_object_storage/metabase/exists_test.go b/pkg/local_object_storage/metabase/exists_test.go
index 0087c1e31..1e4148eba 100644
--- a/pkg/local_object_storage/metabase/exists_test.go
+++ b/pkg/local_object_storage/metabase/exists_test.go
@@ -37,7 +37,7 @@ func TestDB_Exists(t *testing.T) {
require.True(t, exists)
t.Run("removed object", func(t *testing.T) {
- err := metaInhume(db, object.AddressOf(regular), oidtest.Address())
+ err := metaInhume(db, object.AddressOf(regular), oidtest.ID())
require.NoError(t, err)
exists, err := metaExists(db, object.AddressOf(regular))
diff --git a/pkg/local_object_storage/metabase/get_test.go b/pkg/local_object_storage/metabase/get_test.go
index 7654d2cd8..f0caaea70 100644
--- a/pkg/local_object_storage/metabase/get_test.go
+++ b/pkg/local_object_storage/metabase/get_test.go
@@ -150,9 +150,8 @@ func TestDB_Get(t *testing.T) {
t.Run("get removed object", func(t *testing.T) {
obj := oidtest.Address()
- ts := oidtest.Address()
- require.NoError(t, metaInhume(db, obj, ts))
+ require.NoError(t, metaInhume(db, obj, oidtest.ID()))
_, err := metaGet(db, obj, false)
require.True(t, client.IsErrObjectAlreadyRemoved(err))
diff --git a/pkg/local_object_storage/metabase/graveyard_test.go b/pkg/local_object_storage/metabase/graveyard_test.go
index 75c7e2852..b9c6ce28c 100644
--- a/pkg/local_object_storage/metabase/graveyard_test.go
+++ b/pkg/local_object_storage/metabase/graveyard_test.go
@@ -7,6 +7,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
+ cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
"github.com/stretchr/testify/require"
@@ -114,11 +115,12 @@ func TestDB_IterateDeletedObjects(t *testing.T) {
db := newDB(t)
defer func() { require.NoError(t, db.Close()) }()
+ cnr := cidtest.ID()
// generate and put 4 objects
- obj1 := testutil.GenerateObject()
- obj2 := testutil.GenerateObject()
- obj3 := testutil.GenerateObject()
- obj4 := testutil.GenerateObject()
+ obj1 := testutil.GenerateObjectWithCID(cnr)
+ obj2 := testutil.GenerateObjectWithCID(cnr)
+ obj3 := testutil.GenerateObjectWithCID(cnr)
+ obj4 := testutil.GenerateObjectWithCID(cnr)
var err error
@@ -138,6 +140,7 @@ func TestDB_IterateDeletedObjects(t *testing.T) {
// inhume with tombstone
addrTombstone := oidtest.Address()
+ addrTombstone.SetContainer(cnr)
inhumePrm.SetAddresses(object.AddressOf(obj1), object.AddressOf(obj2))
inhumePrm.SetTombstoneAddress(addrTombstone)
@@ -201,11 +204,12 @@ func TestDB_IterateOverGraveyard_Offset(t *testing.T) {
db := newDB(t)
defer func() { require.NoError(t, db.Close()) }()
+ cnr := cidtest.ID()
// generate and put 4 objects
- obj1 := testutil.GenerateObject()
- obj2 := testutil.GenerateObject()
- obj3 := testutil.GenerateObject()
- obj4 := testutil.GenerateObject()
+ obj1 := testutil.GenerateObjectWithCID(cnr)
+ obj2 := testutil.GenerateObjectWithCID(cnr)
+ obj3 := testutil.GenerateObjectWithCID(cnr)
+ obj4 := testutil.GenerateObjectWithCID(cnr)
var err error
@@ -223,6 +227,7 @@ func TestDB_IterateOverGraveyard_Offset(t *testing.T) {
// inhume with tombstone
addrTombstone := oidtest.Address()
+ addrTombstone.SetContainer(cnr)
var inhumePrm meta.InhumePrm
inhumePrm.SetAddresses(
@@ -392,9 +397,10 @@ func TestDB_DropGraves(t *testing.T) {
db := newDB(t)
defer func() { require.NoError(t, db.Close()) }()
+ cnr := cidtest.ID()
// generate and put 2 objects
- obj1 := testutil.GenerateObject()
- obj2 := testutil.GenerateObject()
+ obj1 := testutil.GenerateObjectWithCID(cnr)
+ obj2 := testutil.GenerateObjectWithCID(cnr)
var err error
@@ -406,6 +412,7 @@ func TestDB_DropGraves(t *testing.T) {
// inhume with tombstone
addrTombstone := oidtest.Address()
+ addrTombstone.SetContainer(cnr)
var inhumePrm meta.InhumePrm
inhumePrm.SetAddresses(object.AddressOf(obj1), object.AddressOf(obj2))
diff --git a/pkg/local_object_storage/metabase/inhume.go b/pkg/local_object_storage/metabase/inhume.go
index 3aae15061..77bb84af1 100644
--- a/pkg/local_object_storage/metabase/inhume.go
+++ b/pkg/local_object_storage/metabase/inhume.go
@@ -143,6 +143,20 @@ func (p *InhumePrm) SetForceGCMark() {
p.forceRemoval = true
}
+func (p *InhumePrm) validate() error {
+ if p == nil {
+ return nil
+ }
+ if p.tomb != nil {
+ for _, addr := range p.target {
+ if addr.Container() != p.tomb.Container() {
+ return fmt.Errorf("object %s and tombstone %s have different container ID", addr, p.tomb)
+ }
+ }
+ }
+ return nil
+}
+
var errBreakBucketForEach = errors.New("bucket ForEach break")
// ErrLockObjectRemoval is returned when inhume operation is being
@@ -171,6 +185,10 @@ func (db *DB) Inhume(ctx context.Context, prm InhumePrm) (InhumeRes, error) {
db.modeMtx.RLock()
defer db.modeMtx.RUnlock()
+ if err := prm.validate(); err != nil {
+ return InhumeRes{}, err
+ }
+
if db.mode.NoMetabase() {
return InhumeRes{}, ErrDegradedMode
} else if db.mode.ReadOnly() {
diff --git a/pkg/local_object_storage/metabase/inhume_test.go b/pkg/local_object_storage/metabase/inhume_test.go
index 163fbec2a..277316f7b 100644
--- a/pkg/local_object_storage/metabase/inhume_test.go
+++ b/pkg/local_object_storage/metabase/inhume_test.go
@@ -9,6 +9,7 @@ import (
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+ cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
"github.com/stretchr/testify/require"
@@ -21,12 +22,10 @@ func TestDB_Inhume(t *testing.T) {
raw := testutil.GenerateObject()
testutil.AddAttribute(raw, "foo", "bar")
- tombstoneID := oidtest.Address()
-
err := putBig(db, raw)
require.NoError(t, err)
- err = metaInhume(db, object.AddressOf(raw), tombstoneID)
+ err = metaInhume(db, object.AddressOf(raw), oidtest.ID())
require.NoError(t, err)
_, err = metaExists(db, object.AddressOf(raw))
@@ -43,13 +42,20 @@ func TestInhumeTombOnTomb(t *testing.T) {
var (
err error
+ cnr = cidtest.ID()
addr1 = oidtest.Address()
addr2 = oidtest.Address()
addr3 = oidtest.Address()
+ addr4 = oidtest.Address()
inhumePrm meta.InhumePrm
existsPrm meta.ExistsPrm
)
+ addr1.SetContainer(cnr)
+ addr2.SetContainer(cnr)
+ addr3.SetContainer(cnr)
+ addr4.SetContainer(cnr)
+
inhumePrm.SetAddresses(addr1)
inhumePrm.SetTombstoneAddress(addr2)
@@ -84,7 +90,7 @@ func TestInhumeTombOnTomb(t *testing.T) {
require.True(t, client.IsErrObjectAlreadyRemoved(err))
inhumePrm.SetAddresses(addr1)
- inhumePrm.SetTombstoneAddress(oidtest.Address())
+ inhumePrm.SetTombstoneAddress(addr4)
// try to inhume addr1 (which is already a tombstone in graveyard)
_, err = db.Inhume(context.Background(), inhumePrm)
@@ -117,10 +123,13 @@ func TestInhumeLocked(t *testing.T) {
require.ErrorAs(t, err, &e)
}
-func metaInhume(db *meta.DB, target, tomb oid.Address) error {
+func metaInhume(db *meta.DB, target oid.Address, tomb oid.ID) error {
var inhumePrm meta.InhumePrm
inhumePrm.SetAddresses(target)
- inhumePrm.SetTombstoneAddress(tomb)
+ var tombAddr oid.Address
+ tombAddr.SetContainer(target.Container())
+ tombAddr.SetObject(tomb)
+ inhumePrm.SetTombstoneAddress(tombAddr)
_, err := db.Inhume(context.Background(), inhumePrm)
return err
diff --git a/pkg/local_object_storage/metabase/iterators_test.go b/pkg/local_object_storage/metabase/iterators_test.go
index 54d56d923..777a94a6f 100644
--- a/pkg/local_object_storage/metabase/iterators_test.go
+++ b/pkg/local_object_storage/metabase/iterators_test.go
@@ -9,6 +9,7 @@ import (
object2 "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
+ cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
@@ -71,11 +72,16 @@ func TestDB_IterateCoveredByTombstones(t *testing.T) {
db := newDB(t)
defer func() { require.NoError(t, db.Close()) }()
+ cnr := cidtest.ID()
ts := oidtest.Address()
protected1 := oidtest.Address()
protected2 := oidtest.Address()
protectedLocked := oidtest.Address()
garbage := oidtest.Address()
+ ts.SetContainer(cnr)
+ protected1.SetContainer(cnr)
+ protected2.SetContainer(cnr)
+ protectedLocked.SetContainer(cnr)
var prm meta.InhumePrm
var err error
diff --git a/pkg/local_object_storage/metabase/list_test.go b/pkg/local_object_storage/metabase/list_test.go
index 6207497b1..bc1726bd6 100644
--- a/pkg/local_object_storage/metabase/list_test.go
+++ b/pkg/local_object_storage/metabase/list_test.go
@@ -110,7 +110,7 @@ func TestLisObjectsWithCursor(t *testing.T) {
err = putBig(db, obj)
require.NoError(t, err)
ts := testutil.GenerateObjectWithCID(containerID)
- err = metaInhume(db, object.AddressOf(obj), object.AddressOf(ts))
+ err = metaInhume(db, object.AddressOf(obj), object.AddressOf(ts).Object())
require.NoError(t, err)
// add one child object (do not include parent into expected)
diff --git a/pkg/local_object_storage/metabase/lock_test.go b/pkg/local_object_storage/metabase/lock_test.go
index 62a109b02..9601cb2be 100644
--- a/pkg/local_object_storage/metabase/lock_test.go
+++ b/pkg/local_object_storage/metabase/lock_test.go
@@ -73,7 +73,9 @@ func TestDB_Lock(t *testing.T) {
_, err := db.Inhume(context.Background(), inhumePrm)
require.ErrorAs(t, err, &objLockedErr)
- inhumePrm.SetTombstoneAddress(oidtest.Address())
+ tombAddr := oidtest.Address()
+ tombAddr.SetContainer(objAddr.Container())
+ inhumePrm.SetTombstoneAddress(tombAddr)
_, err = db.Inhume(context.Background(), inhumePrm)
require.ErrorAs(t, err, &objLockedErr)
@@ -89,7 +91,9 @@ func TestDB_Lock(t *testing.T) {
_, err = db.Inhume(context.Background(), inhumePrm)
require.ErrorAs(t, err, &objLockedErr)
- inhumePrm.SetTombstoneAddress(oidtest.Address())
+ tombAddr = oidtest.Address()
+ tombAddr.SetContainer(objAddr.Container())
+ inhumePrm.SetTombstoneAddress(tombAddr)
_, err = db.Inhume(context.Background(), inhumePrm)
require.ErrorAs(t, err, &objLockedErr)
})
@@ -103,7 +107,7 @@ func TestDB_Lock(t *testing.T) {
var objLockedErr *apistatus.ObjectLocked
// try to inhume locked object using tombstone
- err := metaInhume(db, objAddr, lockAddr)
+ err := metaInhume(db, objAddr, lockAddr.Object())
require.ErrorAs(t, err, &objLockedErr)
// free locked object
diff --git a/pkg/local_object_storage/metabase/select_test.go b/pkg/local_object_storage/metabase/select_test.go
index 6469bbdbc..fcd5d3a90 100644
--- a/pkg/local_object_storage/metabase/select_test.go
+++ b/pkg/local_object_storage/metabase/select_test.go
@@ -352,11 +352,7 @@ func TestDB_SelectInhume(t *testing.T) {
object.AddressOf(raw2),
)
- var tombstone oid.Address
- tombstone.SetContainer(cnr)
- tombstone.SetObject(oidtest.ID())
-
- err = metaInhume(db, object.AddressOf(raw2), tombstone)
+ err = metaInhume(db, object.AddressOf(raw2), oidtest.ID())
require.NoError(t, err)
fs = objectSDK.SearchFilters{}
diff --git a/pkg/local_object_storage/metabase/storage_id_test.go b/pkg/local_object_storage/metabase/storage_id_test.go
index aaf6480ab..a86e42bd2 100644
--- a/pkg/local_object_storage/metabase/storage_id_test.go
+++ b/pkg/local_object_storage/metabase/storage_id_test.go
@@ -43,7 +43,7 @@ func TestDB_StorageID(t *testing.T) {
cnrID, ok := deleted.ContainerID()
require.True(t, ok)
ts := testutil.GenerateObjectWithCID(cnrID)
- require.NoError(t, metaInhume(db, object.AddressOf(deleted), object.AddressOf(ts)))
+ require.NoError(t, metaInhume(db, object.AddressOf(deleted), object.AddressOf(ts).Object()))
// check StorageID for object without storageID
fetchedStorageID, err = metaStorageID(db, object.AddressOf(raw2))
diff --git a/pkg/local_object_storage/shard/metrics_test.go b/pkg/local_object_storage/shard/metrics_test.go
index 01a85da97..56622326a 100644
--- a/pkg/local_object_storage/shard/metrics_test.go
+++ b/pkg/local_object_storage/shard/metrics_test.go
@@ -17,6 +17,7 @@ import (
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
"github.com/stretchr/testify/require"
)
@@ -308,17 +309,19 @@ func TestCounters(t *testing.T) {
t.Run("inhume_TS", func(t *testing.T) {
var prm InhumePrm
- ts := objectcore.AddressOf(testutil.GenerateObject())
phy := mm.getObjectCounter(physical)
logic := mm.getObjectCounter(logical)
custom := mm.getObjectCounter(user)
inhumedNumber := int(phy / 4)
- prm.SetTarget(ts, addrFromObjs(oo[:inhumedNumber])...)
-
- _, err := sh.Inhume(context.Background(), prm)
- require.NoError(t, err)
+ for _, o := range addrFromObjs(oo[:inhumedNumber]) {
+ ts := oidtest.Address()
+ ts.SetContainer(o.Container())
+ prm.SetTarget(ts, o)
+ _, err := sh.Inhume(context.Background(), prm)
+ require.NoError(t, err)
+ }
for i := range inhumedNumber {
cid, ok := oo[i].ContainerID()
From fd18aa363b7b33f8b662f9b4bffaf9f3099216a6 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Fri, 20 Sep 2024 13:32:05 +0300
Subject: [PATCH 141/705] [#1385] metabase: Optimize `isTomb` check
As tombstone and target must have the same containerID, do not iterate
other containers.
Signed-off-by: Dmitrii Stepanov
---
pkg/local_object_storage/metabase/inhume.go | 29 ++++++++-------------
1 file changed, 11 insertions(+), 18 deletions(-)
diff --git a/pkg/local_object_storage/metabase/inhume.go b/pkg/local_object_storage/metabase/inhume.go
index 77bb84af1..12f27d330 100644
--- a/pkg/local_object_storage/metabase/inhume.go
+++ b/pkg/local_object_storage/metabase/inhume.go
@@ -377,11 +377,8 @@ func (db *DB) getInhumeTargetBucketAndValue(garbageBKT, graveyardBKT *bbolt.Buck
return targetBucket, value, nil
}
-func (db *DB) markAsGC(graveyardBKT, garbageBKT *bbolt.Bucket, key []byte) (bool, error) {
- targetIsTomb, err := isTomb(graveyardBKT, key)
- if err != nil {
- return false, err
- }
+func (db *DB) markAsGC(graveyardBKT, garbageBKT *bbolt.Bucket, addressKey []byte) (bool, error) {
+ targetIsTomb := isTomb(graveyardBKT, addressKey)
// do not add grave if target is a tombstone
if targetIsTomb {
@@ -390,7 +387,7 @@ func (db *DB) markAsGC(graveyardBKT, garbageBKT *bbolt.Bucket, key []byte) (bool
// if tombstone appears object must be
// additionally marked with GC
- return false, garbageBKT.Put(key, zeroValue)
+ return false, garbageBKT.Put(addressKey, zeroValue)
}
func (db *DB) updateDeleteInfo(tx *bbolt.Tx, garbageBKT, graveyardBKT *bbolt.Bucket, targetKey []byte, cnr cid.ID, obj *objectSDK.Object, res *InhumeRes) error {
@@ -410,25 +407,21 @@ func (db *DB) updateDeleteInfo(tx *bbolt.Tx, garbageBKT, graveyardBKT *bbolt.Buc
return nil
}
-func isTomb(graveyardBucket *bbolt.Bucket, key []byte) (bool, error) {
+func isTomb(graveyardBucket *bbolt.Bucket, addressKey []byte) bool {
targetIsTomb := false
// iterate over graveyard and check if target address
// is the address of tombstone in graveyard.
- err := graveyardBucket.ForEach(func(_, v []byte) error {
+ // tombstone must have the same container ID as key.
+ c := graveyardBucket.Cursor()
+ containerPrefix := addressKey[:cidSize]
+ for k, v := c.Seek(containerPrefix); k != nil && bytes.HasPrefix(k, containerPrefix); k, v = c.Next() {
// check if graveyard has record with key corresponding
// to tombstone address (at least one)
- targetIsTomb = bytes.Equal(v, key)
-
+ targetIsTomb = bytes.Equal(v, addressKey)
if targetIsTomb {
- // break bucket iterator
- return errBreakBucketForEach
+ break
}
-
- return nil
- })
- if err != nil && !errors.Is(err, errBreakBucketForEach) {
- return false, err
}
- return targetIsTomb, nil
+ return targetIsTomb
}
From 95597d34371db6555739c4e92640cd8f8862ee7e Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Mon, 23 Sep 2024 08:56:02 +0300
Subject: [PATCH 142/705] [#1388] golangci: Make `unused` linter stricker
Add aditional checks. The most important false positive - structs used as
map keys.
Signed-off-by: Dmitrii Stepanov
---
.golangci.yml | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/.golangci.yml b/.golangci.yml
index 971f0d0e7..33cf88d8a 100644
--- a/.golangci.yml
+++ b/.golangci.yml
@@ -38,6 +38,10 @@ linters-settings:
alias:
pkg: git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object
alias: objectSDK
+ unused:
+ field-writes-are-uses: false
+ exported-fields-are-used: false
+ local-variables-are-used: false
custom:
truecloudlab-linters:
path: bin/linters/external_linters.so
From 2bd560e52846b77d2902370cfaa80d54fcd77c46 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Mon, 23 Sep 2024 08:57:18 +0300
Subject: [PATCH 143/705] [#1388] cli: Drop unused flag/parameter
Signed-off-by: Dmitrii Stepanov
---
cmd/frostfs-cli/internal/client/client.go | 7 -------
cmd/frostfs-cli/modules/object/head.go | 3 ---
2 files changed, 10 deletions(-)
diff --git a/cmd/frostfs-cli/internal/client/client.go b/cmd/frostfs-cli/internal/client/client.go
index 03a987a57..dcd67f0d9 100644
--- a/cmd/frostfs-cli/internal/client/client.go
+++ b/cmd/frostfs-cli/internal/client/client.go
@@ -565,13 +565,6 @@ type HeadObjectPrm struct {
commonObjectPrm
objectAddressPrm
rawPrm
-
- mainOnly bool
-}
-
-// SetMainOnlyFlag sets flag to get only main fields of an object header in terms of FrostFS API.
-func (x *HeadObjectPrm) SetMainOnlyFlag(v bool) {
- x.mainOnly = v
}
// HeadObjectRes groups the resulting values of HeadObject operation.
diff --git a/cmd/frostfs-cli/modules/object/head.go b/cmd/frostfs-cli/modules/object/head.go
index 14797dc41..cf2e2d5e6 100644
--- a/cmd/frostfs-cli/modules/object/head.go
+++ b/cmd/frostfs-cli/modules/object/head.go
@@ -38,7 +38,6 @@ func initObjectHeadCmd() {
_ = objectHeadCmd.MarkFlagRequired(commonflags.OIDFlag)
flags.String(fileFlag, "", "File to write header to. Default: stdout.")
- flags.Bool("main-only", false, "Return only main fields")
flags.Bool(commonflags.JSON, false, "Marshal output in JSON")
flags.Bool("proto", false, "Marshal output in Protobuf")
flags.Bool(rawFlag, false, rawFlagDesc)
@@ -49,7 +48,6 @@ func getObjectHeader(cmd *cobra.Command, _ []string) {
var obj oid.ID
objAddr := readObjectAddress(cmd, &cnr, &obj)
- mainOnly, _ := cmd.Flags().GetBool("main-only")
pk := key.GetOrGenerate(cmd)
cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC)
@@ -62,7 +60,6 @@ func getObjectHeader(cmd *cobra.Command, _ []string) {
raw, _ := cmd.Flags().GetBool(rawFlag)
prm.SetRawFlag(raw)
prm.SetAddress(objAddr)
- prm.SetMainOnlyFlag(mainOnly)
res, err := internalclient.HeadObject(cmd.Context(), prm)
if err != nil {
From b69e07da7af2c8167e02585a723008fa2753f848 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Mon, 23 Sep 2024 09:02:00 +0300
Subject: [PATCH 144/705] [#1388] metrics: Mark nolint:unused metrics
Although these fields could be deleted, I annotated them so that all the
metrics used would be defined in one place.
Signed-off-by: Dmitrii Stepanov
---
internal/metrics/innerring.go | 3 ++-
internal/metrics/node.go | 3 ++-
2 files changed, 4 insertions(+), 2 deletions(-)
diff --git a/internal/metrics/innerring.go b/internal/metrics/innerring.go
index f6b14a632..f3f529d05 100644
--- a/internal/metrics/innerring.go
+++ b/internal/metrics/innerring.go
@@ -17,7 +17,8 @@ type InnerRingServiceMetrics struct {
eventDuration *prometheus.HistogramVec
morphCacheMetrics *morphCacheMetrics
logMetrics logger.LogMetrics
- appInfo *ApplicationInfo
+ // nolint: unused
+ appInfo *ApplicationInfo
}
// NewInnerRingMetrics returns new instance of metrics collectors for inner ring.
diff --git a/internal/metrics/node.go b/internal/metrics/node.go
index d9e401446..711387875 100644
--- a/internal/metrics/node.go
+++ b/internal/metrics/node.go
@@ -25,7 +25,8 @@ type NodeMetrics struct {
morphClient *morphClientMetrics
morphCache *morphCacheMetrics
log logger.LogMetrics
- appInfo *ApplicationInfo
+ // nolint: unused
+ appInfo *ApplicationInfo
}
func NewNodeMetrics() *NodeMetrics {
From aedb55f913d151669885aa6bc8ea5e83269a60b0 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Mon, 23 Sep 2024 09:06:19 +0300
Subject: [PATCH 145/705] [#1388] governance: Drop unused
Signed-off-by: Dmitrii Stepanov
---
pkg/innerring/initialization.go | 1 -
.../processors/governance/handlers_test.go | 21 -------------------
.../processors/governance/processor.go | 3 ---
3 files changed, 25 deletions(-)
diff --git a/pkg/innerring/initialization.go b/pkg/innerring/initialization.go
index 7da0a9794..c4aaeda56 100644
--- a/pkg/innerring/initialization.go
+++ b/pkg/innerring/initialization.go
@@ -163,7 +163,6 @@ func (s *Server) createAlphaSync(cfg *viper.Viper, frostfsCli *frostfsClient.Cli
Log: s.log,
Metrics: s.irMetrics,
FrostFSClient: frostfsCli,
- NetmapClient: s.netmapClient,
AlphabetState: s,
EpochState: s,
Voter: s,
diff --git a/pkg/innerring/processors/governance/handlers_test.go b/pkg/innerring/processors/governance/handlers_test.go
index b73e24318..87040bdef 100644
--- a/pkg/innerring/processors/governance/handlers_test.go
+++ b/pkg/innerring/processors/governance/handlers_test.go
@@ -8,7 +8,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
frostfscontract "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfs"
- nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/rolemanagement"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
"github.com/nspcc-dev/neo-go/pkg/core/native/noderoles"
@@ -38,7 +37,6 @@ func TestHandleAlphabetSyncEvent(t *testing.T) {
alphabetKeys: testKeys.mainnetKeys,
}
f := &testFrostFSClient{}
- nm := &testNetmapClient{}
proc, err := New(
&Params{
@@ -50,7 +48,6 @@ func TestHandleAlphabetSyncEvent(t *testing.T) {
MorphClient: m,
MainnetClient: mn,
FrostFSClient: f,
- NetmapClient: nm,
},
)
@@ -73,10 +70,6 @@ func TestHandleAlphabetSyncEvent(t *testing.T) {
},
}, v.votes, "invalid vote calls")
- var irUpdateExp []nmClient.UpdateIRPrm
-
- require.EqualValues(t, irUpdateExp, nm.updates, "invalid IR updates")
-
var expAlphabetUpdate client.UpdateAlphabetListPrm
expAlphabetUpdate.SetHash(ev.txHash)
expAlphabetUpdate.SetList(testKeys.newInnerRingExp)
@@ -119,7 +112,6 @@ func TestHandleAlphabetDesignateEvent(t *testing.T) {
alphabetKeys: testKeys.mainnetKeys,
}
f := &testFrostFSClient{}
- nm := &testNetmapClient{}
proc, err := New(
&Params{
@@ -131,7 +123,6 @@ func TestHandleAlphabetDesignateEvent(t *testing.T) {
MorphClient: m,
MainnetClient: mn,
FrostFSClient: f,
- NetmapClient: nm,
},
)
@@ -155,9 +146,6 @@ func TestHandleAlphabetDesignateEvent(t *testing.T) {
},
}, v.votes, "invalid vote calls")
- var irUpdatesExp []nmClient.UpdateIRPrm
- require.EqualValues(t, irUpdatesExp, nm.updates, "invalid IR updates")
-
var alpabetUpdExp client.UpdateAlphabetListPrm
alpabetUpdExp.SetList(testKeys.newInnerRingExp)
alpabetUpdExp.SetHash(ev.TxHash)
@@ -293,12 +281,3 @@ func (c *testFrostFSClient) AlphabetUpdate(p frostfscontract.AlphabetUpdatePrm)
c.updates = append(c.updates, p)
return nil
}
-
-type testNetmapClient struct {
- updates []nmClient.UpdateIRPrm
-}
-
-func (c *testNetmapClient) UpdateInnerRing(p nmClient.UpdateIRPrm) error {
- c.updates = append(c.updates, p)
- return nil
-}
diff --git a/pkg/innerring/processors/governance/processor.go b/pkg/innerring/processors/governance/processor.go
index fa267eade..6daea417e 100644
--- a/pkg/innerring/processors/governance/processor.go
+++ b/pkg/innerring/processors/governance/processor.go
@@ -79,7 +79,6 @@ type (
metrics metrics.Register
pool *ants.Pool
frostfsClient FrostFSClient
- netmapClient NetmapClient
alphabetState AlphabetState
epochState EpochState
@@ -105,7 +104,6 @@ type (
MorphClient MorphClient
MainnetClient MainnetClient
FrostFSClient FrostFSClient
- NetmapClient NetmapClient
}
)
@@ -146,7 +144,6 @@ func New(p *Params) (*Processor, error) {
metrics: metricsRegister,
pool: pool,
frostfsClient: p.FrostFSClient,
- netmapClient: p.NetmapClient,
alphabetState: p.AlphabetState,
epochState: p.EpochState,
voter: p.Voter,
From e319bf403e7ddd24d9527829a9d5863643635ff8 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Mon, 23 Sep 2024 09:08:11 +0300
Subject: [PATCH 146/705] [#1388] apeSvc: Drop unused and make annotations
Signed-off-by: Dmitrii Stepanov
---
cmd/frostfs-node/config.go | 1 -
cmd/frostfs-node/object.go | 1 -
cmd/frostfs-node/policy_engine.go | 4 +++-
pkg/ape/chainbase/option.go | 10 ----------
pkg/services/object/ape/service.go | 6 +-----
5 files changed, 4 insertions(+), 18 deletions(-)
diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go
index 63f410b89..0ffa8c45b 100644
--- a/cmd/frostfs-node/config.go
+++ b/cmd/frostfs-node/config.go
@@ -1082,7 +1082,6 @@ func initAccessPolicyEngine(_ context.Context, c *cfg) {
localOverrideDB = chainbase.NewInmemoryLocalOverrideDatabase()
} else {
localOverrideDB = chainbase.NewBoltLocalOverrideDatabase(
- chainbase.WithLogger(c.log),
chainbase.WithPath(nodeconfig.PersistentPolicyRules(c.appCfg).Path()),
chainbase.WithPerm(nodeconfig.PersistentPolicyRules(c.appCfg).Perm()),
chainbase.WithNoSync(nodeconfig.PersistentPolicyRules(c.appCfg).NoSync()),
diff --git a/cmd/frostfs-node/object.go b/cmd/frostfs-node/object.go
index 610e2c363..9d4e35ca8 100644
--- a/cmd/frostfs-node/object.go
+++ b/cmd/frostfs-node/object.go
@@ -473,7 +473,6 @@ func createACLServiceV2(c *cfg, apeSvc *objectAPE.Service, irFetcher *cachedIRFe
func createAPEService(c *cfg, splitSvc *objectService.TransportSplitter) *objectAPE.Service {
return objectAPE.NewService(
- c.log,
objectAPE.NewChecker(
c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.LocalStorage(),
c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.MorphRuleChainStorage(),
diff --git a/cmd/frostfs-node/policy_engine.go b/cmd/frostfs-node/policy_engine.go
index 22fda2b4c..55f76cc76 100644
--- a/cmd/frostfs-node/policy_engine.go
+++ b/cmd/frostfs-node/policy_engine.go
@@ -21,7 +21,9 @@ type accessPolicyEngine struct {
var _ engine.MorphRuleChainStorageReader = (*morphAPEChainCache)(nil)
type morphAPEChainCacheKey struct {
- name chain.Name
+ // nolint:unused
+ name chain.Name
+ // nolint:unused
target engine.Target
}
diff --git a/pkg/ape/chainbase/option.go b/pkg/ape/chainbase/option.go
index e547701fb..590b7a885 100644
--- a/pkg/ape/chainbase/option.go
+++ b/pkg/ape/chainbase/option.go
@@ -5,9 +5,7 @@ import (
"os"
"time"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"go.etcd.io/bbolt"
- "go.uber.org/zap"
)
type Option func(*cfg)
@@ -18,7 +16,6 @@ type cfg struct {
noSync bool
maxBatchDelay time.Duration
maxBatchSize int
- log *logger.Logger
}
func defaultCfg() *cfg {
@@ -26,7 +23,6 @@ func defaultCfg() *cfg {
perm: os.ModePerm,
maxBatchDelay: bbolt.DefaultMaxBatchDelay,
maxBatchSize: bbolt.DefaultMaxBatchSize,
- log: &logger.Logger{Logger: zap.L()},
}
}
@@ -59,9 +55,3 @@ func WithMaxBatchSize(maxBatchSize int) Option {
c.maxBatchSize = maxBatchSize
}
}
-
-func WithLogger(l *logger.Logger) Option {
- return func(c *cfg) {
- c.log = l
- }
-}
diff --git a/pkg/services/object/ape/service.go b/pkg/services/object/ape/service.go
index a1634e7c5..6eedaf99e 100644
--- a/pkg/services/object/ape/service.go
+++ b/pkg/services/object/ape/service.go
@@ -12,7 +12,6 @@ import (
objectSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@@ -24,8 +23,6 @@ import (
var errFailedToCastToRequestContext = errors.New("failed cast to RequestContext")
type Service struct {
- log *logger.Logger
-
apeChecker Checker
next objectSvc.ServiceServer
@@ -67,9 +64,8 @@ func NewStorageEngineHeaderProvider(e *engine.StorageEngine, s *getsvc.Service)
}
}
-func NewService(log *logger.Logger, apeChecker Checker, next objectSvc.ServiceServer) *Service {
+func NewService(apeChecker Checker, next objectSvc.ServiceServer) *Service {
return &Service{
- log: log,
apeChecker: apeChecker,
next: next,
}
From 580cd551807cea0ad2b9dfe9fbd21da0b55d6282 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Mon, 23 Sep 2024 09:10:03 +0300
Subject: [PATCH 147/705] [#1388] getSvc: Drop unused
Signed-off-by: Dmitrii Stepanov
---
pkg/services/object/get/assembleec.go | 2 +-
pkg/services/object/get/assemblerec.go | 3 ---
2 files changed, 1 insertion(+), 4 deletions(-)
diff --git a/pkg/services/object/get/assembleec.go b/pkg/services/object/get/assembleec.go
index a58602bf7..03f913bbf 100644
--- a/pkg/services/object/get/assembleec.go
+++ b/pkg/services/object/get/assembleec.go
@@ -43,7 +43,7 @@ func (r *request) assembleEC(ctx context.Context) {
}
r.prm.common = r.prm.common.WithLocalOnly(false)
- assembler := newAssemblerEC(r.address(), r.infoEC, r.ctxRange(), r, r.localStorage, r.containerSource, r.log, r.headOnly(), r.traverserGenerator, r.curProcEpoch)
+ assembler := newAssemblerEC(r.address(), r.infoEC, r.ctxRange(), r, r.localStorage, r.log, r.headOnly(), r.traverserGenerator, r.curProcEpoch)
r.log.Debug(logs.GetAssemblingECObject,
zap.Uint64("range_offset", r.ctxRange().GetOffset()),
diff --git a/pkg/services/object/get/assemblerec.go b/pkg/services/object/get/assemblerec.go
index dde0d7dad..44d9af3a2 100644
--- a/pkg/services/object/get/assemblerec.go
+++ b/pkg/services/object/get/assemblerec.go
@@ -34,7 +34,6 @@ type assemblerec struct {
rng *objectSDK.Range
remoteStorage ecRemoteStorage
localStorage localStorage
- cs container.Source
log *logger.Logger
head bool
traverserGenerator traverserGenerator
@@ -47,7 +46,6 @@ func newAssemblerEC(
rng *objectSDK.Range,
remoteStorage ecRemoteStorage,
localStorage localStorage,
- cs container.Source,
log *logger.Logger,
head bool,
tg traverserGenerator,
@@ -59,7 +57,6 @@ func newAssemblerEC(
ecInfo: ecInfo,
remoteStorage: remoteStorage,
localStorage: localStorage,
- cs: cs,
log: log,
head: head,
traverserGenerator: tg,
From 63a567a1de8d40b87c5e0cfcb99235eb4079f059 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Mon, 23 Sep 2024 09:10:38 +0300
Subject: [PATCH 148/705] [#1388] engine: Drop unused
Signed-off-by: Dmitrii Stepanov
---
pkg/local_object_storage/engine/control.go | 14 --------------
1 file changed, 14 deletions(-)
diff --git a/pkg/local_object_storage/engine/control.go b/pkg/local_object_storage/engine/control.go
index 4778cf539..80fb3f9ed 100644
--- a/pkg/local_object_storage/engine/control.go
+++ b/pkg/local_object_storage/engine/control.go
@@ -249,23 +249,9 @@ func (e *StorageEngine) ResumeExecution() error {
}
type ReConfiguration struct {
- errorsThreshold uint32
- shardPoolSize uint32
-
shards map[string][]shard.Option // meta path -> shard opts
}
-// SetErrorsThreshold sets a size amount of errors after which
-// shard is moved to read-only mode.
-func (rCfg *ReConfiguration) SetErrorsThreshold(errorsThreshold uint32) {
- rCfg.errorsThreshold = errorsThreshold
-}
-
-// SetShardPoolSize sets a size of worker pool for each shard.
-func (rCfg *ReConfiguration) SetShardPoolSize(shardPoolSize uint32) {
- rCfg.shardPoolSize = shardPoolSize
-}
-
// AddShard adds a shard for the reconfiguration.
// Shard identifier is calculated from paths used in blobstor.
func (rCfg *ReConfiguration) AddShard(id string, opts []shard.Option) {
From 004ff9e9bf68174fbb64df6cbc81f98ced8755d3 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Mon, 23 Sep 2024 09:11:15 +0300
Subject: [PATCH 149/705] [#1388] blobstor: Drop unused
Signed-off-by: Dmitrii Stepanov
---
.../blobstor/memstore/control.go | 16 ++++++++--------
.../blobstor/memstore/memstore_test.go | 2 --
.../blobstor/memstore/option.go | 15 +--------------
3 files changed, 9 insertions(+), 24 deletions(-)
diff --git a/pkg/local_object_storage/blobstor/memstore/control.go b/pkg/local_object_storage/blobstor/memstore/control.go
index 449d4352a..83da52eb7 100644
--- a/pkg/local_object_storage/blobstor/memstore/control.go
+++ b/pkg/local_object_storage/blobstor/memstore/control.go
@@ -10,11 +10,11 @@ func (s *memstoreImpl) Open(mod mode.ComponentMode) error {
return nil
}
-func (s *memstoreImpl) Init() error { return nil }
-func (s *memstoreImpl) Close() error { return nil }
-func (s *memstoreImpl) Type() string { return Type }
-func (s *memstoreImpl) Path() string { return s.rootPath }
-func (s *memstoreImpl) SetCompressor(cc *compression.Config) { s.compression = cc }
-func (s *memstoreImpl) Compressor() *compression.Config { return s.compression }
-func (s *memstoreImpl) SetReportErrorFunc(f func(string, error)) { s.reportError = f }
-func (s *memstoreImpl) SetParentID(string) {}
+func (s *memstoreImpl) Init() error { return nil }
+func (s *memstoreImpl) Close() error { return nil }
+func (s *memstoreImpl) Type() string { return Type }
+func (s *memstoreImpl) Path() string { return s.rootPath }
+func (s *memstoreImpl) SetCompressor(cc *compression.Config) { s.compression = cc }
+func (s *memstoreImpl) Compressor() *compression.Config { return s.compression }
+func (s *memstoreImpl) SetReportErrorFunc(func(string, error)) {}
+func (s *memstoreImpl) SetParentID(string) {}
diff --git a/pkg/local_object_storage/blobstor/memstore/memstore_test.go b/pkg/local_object_storage/blobstor/memstore/memstore_test.go
index 8d1480dff..dd130e5f9 100644
--- a/pkg/local_object_storage/blobstor/memstore/memstore_test.go
+++ b/pkg/local_object_storage/blobstor/memstore/memstore_test.go
@@ -8,7 +8,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/internal/blobstortest"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"github.com/stretchr/testify/require"
)
@@ -16,7 +15,6 @@ import (
func TestSimpleLifecycle(t *testing.T) {
s := New(
WithRootPath("memstore"),
- WithLogger(test.NewLogger(t)),
)
defer func() { require.NoError(t, s.Close()) }()
require.NoError(t, s.Open(mode.ComponentReadWrite))
diff --git a/pkg/local_object_storage/blobstor/memstore/option.go b/pkg/local_object_storage/blobstor/memstore/option.go
index 3d67b1e9c..97a03993d 100644
--- a/pkg/local_object_storage/blobstor/memstore/option.go
+++ b/pkg/local_object_storage/blobstor/memstore/option.go
@@ -2,33 +2,20 @@ package memstore
import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- "go.uber.org/zap"
)
type cfg struct {
- log *logger.Logger
rootPath string
readOnly bool
compression *compression.Config
- reportError func(string, error)
}
func defaultConfig() *cfg {
- return &cfg{
- log: &logger.Logger{Logger: zap.L()},
- reportError: func(string, error) {},
- }
+ return &cfg{}
}
type Option func(*cfg)
-func WithLogger(l *logger.Logger) Option {
- return func(c *cfg) {
- c.log = l
- }
-}
-
func WithRootPath(p string) Option {
return func(c *cfg) {
c.rootPath = p
From 401c398704f15c1d516fbcc04f842d9d3fb8c2d3 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Mon, 23 Sep 2024 09:12:01 +0300
Subject: [PATCH 150/705] [#1388] metabase: Drop unused
Signed-off-by: Dmitrii Stepanov
---
pkg/local_object_storage/metabase/delete.go | 7 ++-----
1 file changed, 2 insertions(+), 5 deletions(-)
diff --git a/pkg/local_object_storage/metabase/delete.go b/pkg/local_object_storage/metabase/delete.go
index e5e9840a0..4ad11164f 100644
--- a/pkg/local_object_storage/metabase/delete.go
+++ b/pkg/local_object_storage/metabase/delete.go
@@ -77,8 +77,6 @@ func (p *DeletePrm) SetAddresses(addrs ...oid.Address) {
type referenceNumber struct {
all, cur int
- addr oid.Address
-
obj *objectSDK.Object
}
@@ -295,9 +293,8 @@ func (db *DB) delete(tx *bbolt.Tx, addr oid.Address, refCounter referenceCounter
nRef, ok := refCounter[k]
if !ok {
nRef = &referenceNumber{
- all: parentLength(tx, parAddr),
- addr: parAddr,
- obj: parent,
+ all: parentLength(tx, parAddr),
+ obj: parent,
}
refCounter[k] = nRef
From d1d6e3471c2e902c29480a091545f09c4daaf335 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Mon, 23 Sep 2024 09:12:32 +0300
Subject: [PATCH 151/705] [#1388] signSvc: Drop unused
Signed-off-by: Dmitrii Stepanov
---
pkg/services/object/sign.go | 3 ---
1 file changed, 3 deletions(-)
diff --git a/pkg/services/object/sign.go b/pkg/services/object/sign.go
index 35367aafe..f5ae97b62 100644
--- a/pkg/services/object/sign.go
+++ b/pkg/services/object/sign.go
@@ -10,8 +10,6 @@ import (
)
type SignService struct {
- key *ecdsa.PrivateKey
-
sigSvc *util.SignService
svc ServiceServer
@@ -48,7 +46,6 @@ type getRangeStreamSigner struct {
func NewSignService(key *ecdsa.PrivateKey, svc ServiceServer) *SignService {
return &SignService{
- key: key,
sigSvc: util.NewUnarySignService(key),
svc: svc,
}
From bdd57c8b6b03f78ed74c31db41f5bbd0f3c84beb Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Mon, 23 Sep 2024 09:12:54 +0300
Subject: [PATCH 152/705] [#1388] sessionSvc: Add nolint annotations
Used as map key.
Signed-off-by: Dmitrii Stepanov
---
pkg/services/session/storage/temporary/storage.go | 2 ++
1 file changed, 2 insertions(+)
diff --git a/pkg/services/session/storage/temporary/storage.go b/pkg/services/session/storage/temporary/storage.go
index ee93dee71..9ae9db9dc 100644
--- a/pkg/services/session/storage/temporary/storage.go
+++ b/pkg/services/session/storage/temporary/storage.go
@@ -9,7 +9,9 @@ import (
)
type key struct {
+ // nolint:unused
tokenID string
+ // nolint:unused
ownerID string
}
From a2ab6d4942046c3bca59addd2b73ce3b58251b84 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Mon, 23 Sep 2024 09:13:27 +0300
Subject: [PATCH 153/705] [#1388] node: Drop unused
Signed-off-by: Dmitrii Stepanov
---
cmd/frostfs-node/config.go | 1 -
cmd/frostfs-node/container.go | 6 ------
cmd/frostfs-node/netmap.go | 1 -
3 files changed, 8 deletions(-)
diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go
index 0ffa8c45b..c625b575f 100644
--- a/cmd/frostfs-node/config.go
+++ b/cmd/frostfs-node/config.go
@@ -602,7 +602,6 @@ type cfgNetmap struct {
needBootstrap bool
reBoostrapTurnedOff *atomic.Bool // managed by control service in runtime
- startEpoch uint64 // epoch number when application is started
}
type cfgNodeInfo struct {
diff --git a/cmd/frostfs-node/container.go b/cmd/frostfs-node/container.go
index 5a29aac76..6733140d2 100644
--- a/cmd/frostfs-node/container.go
+++ b/cmd/frostfs-node/container.go
@@ -128,9 +128,6 @@ func configureEACLAndContainerSources(c *cfg, client *cntClient.Client, cnrSrc c
cnrRdr.lister = client
cnrRdr.eacl = c.cfgObject.eaclSource
cnrRdr.src = c.cfgObject.cnrSource
-
- cnrWrt.cacheEnabled = true
- cnrWrt.eacls = cachedEACLStorage
}
return cnrRdr, cnrWrt
@@ -247,9 +244,6 @@ func (x *morphContainerReader) ContainersOf(id *user.ID) ([]cid.ID, error) {
type morphContainerWriter struct {
neoClient *cntClient.Client
-
- cacheEnabled bool
- eacls ttlEACLStorage
}
func (m morphContainerWriter) Put(cnr containerCore.Container) (*cid.ID, error) {
diff --git a/cmd/frostfs-node/netmap.go b/cmd/frostfs-node/netmap.go
index c0b87492c..5e4585f85 100644
--- a/cmd/frostfs-node/netmap.go
+++ b/cmd/frostfs-node/netmap.go
@@ -259,7 +259,6 @@ func initNetmapState(c *cfg) {
}
c.cfgNetmap.state.setCurrentEpoch(epoch)
- c.cfgNetmap.startEpoch = epoch
c.setContractNodeInfo(ni)
}
From 29e4cf7ba1c88552172bdbb19dade34ea9ff5ba2 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Mon, 23 Sep 2024 10:51:05 +0300
Subject: [PATCH 154/705] [#1388] ir: Annotate cmode as nolint
Signed-off-by: Dmitrii Stepanov
---
pkg/innerring/innerring.go | 2 ++
1 file changed, 2 insertions(+)
diff --git a/pkg/innerring/innerring.go b/pkg/innerring/innerring.go
index 50a37845b..53a07e36c 100644
--- a/pkg/innerring/innerring.go
+++ b/pkg/innerring/innerring.go
@@ -103,6 +103,8 @@ type (
// to the application.
runners []func(chan<- error) error
+ // cmode used for upgrade scenario.
+ // nolint:unused
cmode *atomic.Bool
}
From 4fbfffd44c4e0f4aa7bc88052eff8400a0421f7c Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Tue, 24 Sep 2024 12:13:11 +0300
Subject: [PATCH 155/705] [#1388] putSvc: Drop unused
Signed-off-by: Dmitrii Stepanov
---
pkg/services/object/put/prm.go | 11 -----------
1 file changed, 11 deletions(-)
diff --git a/pkg/services/object/put/prm.go b/pkg/services/object/put/prm.go
index 0c8f12b45..52a7c102c 100644
--- a/pkg/services/object/put/prm.go
+++ b/pkg/services/object/put/prm.go
@@ -2,7 +2,6 @@ package putsvc
import (
"context"
- "crypto/ecdsa"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
@@ -21,8 +20,6 @@ type PutInitPrm struct {
traverseOpts []placement.Option
relay func(context.Context, client.NodeInfo, client.MultiAddressClient) error
-
- privateKey *ecdsa.PrivateKey
}
type PutChunkPrm struct {
@@ -68,11 +65,3 @@ func (p *PutChunkPrm) WithChunk(v []byte) *PutChunkPrm {
return p
}
-
-func (p *PutInitPrm) WithPrivateKey(v *ecdsa.PrivateKey) *PutInitPrm {
- if p != nil {
- p.privateKey = v
- }
-
- return p
-}
From 772b471aab53774e1d2cf11ae7db28166a47ec45 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Tue, 24 Sep 2024 15:58:52 +0300
Subject: [PATCH 156/705] [#1388] lens: Add nolint annotations
Signed-off-by: Dmitrii Stepanov
---
cmd/frostfs-lens/internal/schema/common/raw.go | 2 ++
cmd/frostfs-lens/internal/schema/writecache/types.go | 2 ++
2 files changed, 4 insertions(+)
diff --git a/cmd/frostfs-lens/internal/schema/common/raw.go b/cmd/frostfs-lens/internal/schema/common/raw.go
index 0990e24c3..55051554c 100644
--- a/cmd/frostfs-lens/internal/schema/common/raw.go
+++ b/cmd/frostfs-lens/internal/schema/common/raw.go
@@ -7,6 +7,8 @@ import (
)
type RawEntry struct {
+ // key and value used for record dump.
+ // nolint:unused
key, value []byte
}
diff --git a/cmd/frostfs-lens/internal/schema/writecache/types.go b/cmd/frostfs-lens/internal/schema/writecache/types.go
index 3f71c5366..11e6f3fcd 100644
--- a/cmd/frostfs-lens/internal/schema/writecache/types.go
+++ b/cmd/frostfs-lens/internal/schema/writecache/types.go
@@ -16,6 +16,8 @@ type (
DefaultRecord struct {
addr oid.Address
+ // data used for record dump.
+ // nolint:unused
data []byte
}
)
From a5e1aa22c963fe612d6d2d3316ee7ca0482f0d09 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Wed, 25 Sep 2024 17:15:03 +0300
Subject: [PATCH 157/705] [#1394] putSvc: Fix relay
Signed-off-by: Dmitrii Stepanov
---
pkg/services/object/common/target/target.go | 9 +++++----
pkg/services/object/patch/streamer.go | 2 +-
pkg/services/object/put/streamer.go | 2 +-
3 files changed, 7 insertions(+), 6 deletions(-)
diff --git a/pkg/services/object/common/target/target.go b/pkg/services/object/common/target/target.go
index 980c4c6bd..a2d6b4d39 100644
--- a/pkg/services/object/common/target/target.go
+++ b/pkg/services/object/common/target/target.go
@@ -13,16 +13,16 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
)
-func New(prm *objectwriter.Params) (transformer.ChunkedObjectWriter, error) {
+func New(prm objectwriter.Params) (transformer.ChunkedObjectWriter, error) {
// prepare needed put parameters
- if err := preparePrm(prm); err != nil {
+ if err := preparePrm(&prm); err != nil {
return nil, fmt.Errorf("could not prepare put parameters: %w", err)
}
if prm.Header.Signature() != nil {
- return newUntrustedTarget(prm)
+ return newUntrustedTarget(&prm)
}
- return newTrustedTarget(prm)
+ return newTrustedTarget(&prm)
}
func newUntrustedTarget(prm *objectwriter.Params) (transformer.ChunkedObjectWriter, error) {
@@ -49,6 +49,7 @@ func newUntrustedTarget(prm *objectwriter.Params) (transformer.ChunkedObjectWrit
}
func newTrustedTarget(prm *objectwriter.Params) (transformer.ChunkedObjectWriter, error) {
+ prm.Relay = nil // do not relay request without signature
maxPayloadSz := prm.Config.MaxSizeSrc.MaxObjectSize()
if maxPayloadSz == 0 {
return nil, errors.New("could not obtain max object size parameter")
diff --git a/pkg/services/object/patch/streamer.go b/pkg/services/object/patch/streamer.go
index 73def8c7c..c8ed6fdbf 100644
--- a/pkg/services/object/patch/streamer.go
+++ b/pkg/services/object/patch/streamer.go
@@ -112,7 +112,7 @@ func (s *Streamer) init(ctx context.Context, req *objectV2.PatchRequest) error {
}
oV2.GetHeader().SetOwnerID(ownerID)
- target, err := target.New(&objectwriter.Params{
+ target, err := target.New(objectwriter.Params{
Config: s.Config,
Common: commonPrm,
Header: objectSDK.NewFromV2(oV2),
diff --git a/pkg/services/object/put/streamer.go b/pkg/services/object/put/streamer.go
index d08e7fafa..f71309d31 100644
--- a/pkg/services/object/put/streamer.go
+++ b/pkg/services/object/put/streamer.go
@@ -26,7 +26,7 @@ func (p *Streamer) Init(ctx context.Context, prm *PutInitPrm) error {
}
// initialize destination target
- prmTarget := &objectwriter.Params{
+ prmTarget := objectwriter.Params{
Config: p.Config,
Common: prm.common,
Header: prm.hdr,
From 5f22ba6f380fd9d41be070f000b10cc4432981b9 Mon Sep 17 00:00:00 2001
From: Airat Arifullin
Date: Fri, 27 Sep 2024 13:45:57 +0300
Subject: [PATCH 158/705] [#1397] object: Correctly set namespace before APE
check
Signed-off-by: Airat Arifullin
---
pkg/services/object/ape/checker.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/pkg/services/object/ape/checker.go b/pkg/services/object/ape/checker.go
index 3688638d0..3f6cc7c20 100644
--- a/pkg/services/object/ape/checker.go
+++ b/pkg/services/object/ape/checker.go
@@ -108,7 +108,7 @@ func (c *checkerImpl) CheckAPE(ctx context.Context, prm Prm) error {
return c.checkerCore.CheckAPE(checkercore.CheckPrm{
Request: r,
PublicKey: pub,
- Namespace: prm.Method,
+ Namespace: prm.Namespace,
Container: prm.Container,
ContainerOwner: prm.ContainerOwner,
BearerToken: prm.BearerToken,
From d0ed29b3c73626f6bf881090f86bdc834d81acc1 Mon Sep 17 00:00:00 2001
From: Anton Nikiforov
Date: Tue, 3 Sep 2024 15:42:38 +0300
Subject: [PATCH 159/705] [#1350] node: Add ability to evacuate objects from
`REP 1` only
Signed-off-by: Anton Nikiforov
---
cmd/frostfs-cli/modules/control/evacuation.go | 4 +
docs/evacuation.md | 7 +-
pkg/local_object_storage/engine/evacuate.go | 39 +++-
.../engine/evacuate_test.go | 181 +++++++++++++++++-
pkg/local_object_storage/metabase/list.go | 51 +++++
pkg/local_object_storage/shard/list.go | 30 ++-
pkg/services/control/server/evacuate_async.go | 1 +
pkg/services/control/service.proto | 2 +
pkg/services/control/service_frostfs.pb.go | 31 +++
9 files changed, 340 insertions(+), 6 deletions(-)
diff --git a/cmd/frostfs-cli/modules/control/evacuation.go b/cmd/frostfs-cli/modules/control/evacuation.go
index 04a67e5b5..fffc5e33e 100644
--- a/cmd/frostfs-cli/modules/control/evacuation.go
+++ b/cmd/frostfs-cli/modules/control/evacuation.go
@@ -20,6 +20,7 @@ const (
awaitFlag = "await"
noProgressFlag = "no-progress"
scopeFlag = "scope"
+ repOneOnlyFlag = "rep-one-only"
containerWorkerCountFlag = "container-worker-count"
objectWorkerCountFlag = "object-worker-count"
@@ -69,6 +70,7 @@ func startEvacuateShard(cmd *cobra.Command, _ []string) {
ignoreErrors, _ := cmd.Flags().GetBool(ignoreErrorsFlag)
containerWorkerCount, _ := cmd.Flags().GetUint32(containerWorkerCountFlag)
objectWorkerCount, _ := cmd.Flags().GetUint32(objectWorkerCountFlag)
+ repOneOnly, _ := cmd.Flags().GetBool(repOneOnlyFlag)
req := &control.StartShardEvacuationRequest{
Body: &control.StartShardEvacuationRequest_Body{
@@ -77,6 +79,7 @@ func startEvacuateShard(cmd *cobra.Command, _ []string) {
Scope: getEvacuationScope(cmd),
ContainerWorkerCount: containerWorkerCount,
ObjectWorkerCount: objectWorkerCount,
+ RepOneOnly: repOneOnly,
},
}
@@ -380,6 +383,7 @@ func initControlStartEvacuationShardCmd() {
flags.Bool(noProgressFlag, false, fmt.Sprintf("Print progress if %s provided", awaitFlag))
flags.Uint32(containerWorkerCountFlag, 0, "Count of concurrent container evacuation workers")
flags.Uint32(objectWorkerCountFlag, 0, "Count of concurrent object evacuation workers")
+ flags.Bool(repOneOnlyFlag, false, "Evacuate objects only from containers with policy 'REP 1 ...'")
startEvacuationShardCmd.MarkFlagsMutuallyExclusive(shardIDFlag, shardAllFlag)
}
diff --git a/docs/evacuation.md b/docs/evacuation.md
index 885ce169a..d47d56d15 100644
--- a/docs/evacuation.md
+++ b/docs/evacuation.md
@@ -20,7 +20,12 @@ Because it is necessary to prevent removing by policer objects with policy `REP
## Commands
-`frostfs-cli control shards evacuation start` starts evacuation process for shards specified. To start evacuating all node shards, use the `--all` flag. By default, objects and trees are evacuated. To limit the evacuation scope, use `--scope` flag (possible values are `all`, `trees`, `objects`).
+`frostfs-cli control shards evacuation start` starts evacuation process for shards specified. To start evacuating all node shards, use the `--all` flag.
+By default, objects and trees are evacuated. To limit the evacuation scope, use `--scope` flag (possible values are `all`, `trees`, `objects`).
+To evacuate objects only from containers with policy `REP 1` use option `--rep-one-only`.
+To adjust resource consumption required for evacuation use options:
+ - `--container-worker-count` count of concurrent container evacuation workers
+ - `--object-worker-count` count of concurrent object evacuation workers
`frostfs-cli control shards evacuation stop` stops running evacuation process.
diff --git a/pkg/local_object_storage/engine/evacuate.go b/pkg/local_object_storage/engine/evacuate.go
index 3db556a8f..a618ff274 100644
--- a/pkg/local_object_storage/engine/evacuate.go
+++ b/pkg/local_object_storage/engine/evacuate.go
@@ -9,6 +9,7 @@ import (
"sync/atomic"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
@@ -16,6 +17,7 @@ import (
tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@@ -88,6 +90,7 @@ type EvacuateShardPrm struct {
IgnoreErrors bool
Async bool
Scope EvacuateScope
+ RepOneOnly bool
ContainerWorkerCount uint32
ObjectWorkerCount uint32
@@ -288,6 +291,7 @@ func (e *StorageEngine) evacuateShards(ctx context.Context, shardIDs []string, p
attribute.Bool("async", prm.Async),
attribute.Bool("ignoreErrors", prm.IgnoreErrors),
attribute.Stringer("scope", prm.Scope),
+ attribute.Bool("repOneOnly", prm.RepOneOnly),
))
defer func() {
@@ -430,13 +434,34 @@ func (e *StorageEngine) evacuateShardObjects(ctx context.Context, cancel context
) error {
sh := shardsToEvacuate[shardID]
var cntPrm shard.IterateOverContainersPrm
- cntPrm.Handler = func(ctx context.Context, name []byte, _ cid.ID) error {
+ cntPrm.Handler = func(ctx context.Context, name []byte, cnt cid.ID) error {
select {
case <-ctx.Done():
return context.Cause(ctx)
default:
}
egContainer.Go(func() error {
+ var skip bool
+ c, err := e.containerSource.Load().cs.Get(cnt)
+ if err != nil {
+ if client.IsErrContainerNotFound(err) {
+ skip = true
+ } else {
+ return err
+ }
+ }
+ if !skip && prm.RepOneOnly {
+ skip = e.isNotRepOne(c)
+ }
+ if skip {
+ countPrm := shard.CountAliveObjectsInBucketPrm{BucketName: name}
+ count, err := sh.CountAliveObjectsInBucket(ctx, countPrm)
+ if err != nil {
+ return err
+ }
+ res.objSkipped.Add(count)
+ return nil
+ }
var objPrm shard.IterateOverObjectsInContainerPrm
objPrm.BucketName = name
objPrm.Handler = func(ctx context.Context, objInfo *object.Info) error {
@@ -454,7 +479,7 @@ func (e *StorageEngine) evacuateShardObjects(ctx context.Context, cancel context
})
return nil
}
- err := sh.IterateOverObjectsInContainer(ctx, objPrm)
+ err = sh.IterateOverObjectsInContainer(ctx, objPrm)
if err != nil {
cancel(err)
}
@@ -781,6 +806,16 @@ func (e *StorageEngine) evacuateObject(ctx context.Context, shardID string, objI
return nil
}
+func (e *StorageEngine) isNotRepOne(c *container.Container) bool {
+ p := c.Value.PlacementPolicy()
+ for i := range p.NumberOfReplicas() {
+ if p.ReplicaDescriptor(i).NumberOfObjects() > 1 {
+ return true
+ }
+ }
+ return false
+}
+
func (e *StorageEngine) tryEvacuateObjectLocal(ctx context.Context, addr oid.Address, object *objectSDK.Object, sh *shard.Shard,
shards []pooledShard, shardsToEvacuate map[string]*shard.Shard, res *EvacuateShardRes,
) (bool, error) {
diff --git a/pkg/local_object_storage/engine/evacuate_test.go b/pkg/local_object_storage/engine/evacuate_test.go
index f72333399..8498c9245 100644
--- a/pkg/local_object_storage/engine/evacuate_test.go
+++ b/pkg/local_object_storage/engine/evacuate_test.go
@@ -11,6 +11,7 @@ import (
"testing"
"time"
+ coreContainer "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
@@ -20,14 +21,38 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
+ apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/stretchr/testify/require"
"golang.org/x/sync/errgroup"
)
+type containerStorage struct {
+ cntmap map[cid.ID]*container.Container
+ latency time.Duration
+}
+
+func (cs *containerStorage) Get(id cid.ID) (*coreContainer.Container, error) {
+ time.Sleep(cs.latency)
+ v, ok := cs.cntmap[id]
+ if !ok {
+ return nil, new(apistatus.ContainerNotFound)
+ }
+ coreCnt := coreContainer.Container{
+ Value: *v,
+ }
+ return &coreCnt, nil
+}
+
+func (cs *containerStorage) DeletionInfo(cid.ID) (*coreContainer.DelInfo, error) {
+ return nil, nil
+}
+
func newEngineEvacuate(t *testing.T, shardNum int, objPerShard int) (*StorageEngine, []*shard.ID, []*objectSDK.Object) {
dir := t.TempDir()
@@ -61,10 +86,15 @@ func newEngineEvacuate(t *testing.T, shardNum int, objPerShard int) (*StorageEng
{Key: pilorama.AttributeVersion, Value: []byte("XXX")},
{Key: pilorama.AttributeFilename, Value: []byte("file.txt")},
}
-
+ cnrMap := make(map[cid.ID]*container.Container)
for _, sh := range ids {
- for range objPerShard {
+ for i := range objPerShard {
+ // Create dummy container
+ cnr1 := container.Container{}
+ cnr1.SetAttribute("cnr", "cnr"+strconv.Itoa(i))
contID := cidtest.ID()
+ cnrMap[contID] = &cnr1
+
obj := testutil.GenerateObjectWithCID(contID)
objects = append(objects, obj)
@@ -78,6 +108,7 @@ func newEngineEvacuate(t *testing.T, shardNum int, objPerShard int) (*StorageEng
require.NoError(t, err)
}
}
+ e.SetContainerSource(&containerStorage{cntmap: cnrMap})
return e, ids, objects
}
@@ -177,7 +208,10 @@ func TestEvacuateObjectsNetwork(t *testing.T) {
acceptOneOf := func(objects []*objectSDK.Object, max uint64) func(context.Context, oid.Address, *objectSDK.Object) (bool, error) {
var n atomic.Uint64
+ var mtx sync.Mutex
return func(_ context.Context, addr oid.Address, obj *objectSDK.Object) (bool, error) {
+ mtx.Lock()
+ defer mtx.Unlock()
if n.Load() == max {
return false, errReplication
}
@@ -640,3 +674,146 @@ func TestEvacuateTreesRemote(t *testing.T) {
require.Equal(t, expectedTreeOps, evacuatedTreeOps)
}
+
+func TestEvacuateShardObjectsRepOneOnly(t *testing.T) {
+ e, ids, _ := newEngineEvacuate(t, 2, 0)
+ defer func() {
+ require.NoError(t, e.Close(context.Background()))
+ }()
+
+ // Create container with policy REP 2
+ cnr1 := container.Container{}
+ p1 := netmap.PlacementPolicy{}
+ p1.SetContainerBackupFactor(1)
+ x1 := netmap.ReplicaDescriptor{}
+ x1.SetNumberOfObjects(2)
+ p1.AddReplicas(x1)
+ x1 = netmap.ReplicaDescriptor{}
+ x1.SetNumberOfObjects(1)
+ p1.AddReplicas(x1)
+ cnr1.SetPlacementPolicy(p1)
+ cnr1.SetAttribute("cnr", "cnr1")
+
+ var idCnr1 cid.ID
+ container.CalculateID(&idCnr1, cnr1)
+
+ cnrmap := make(map[cid.ID]*container.Container)
+ var cids []cid.ID
+ cnrmap[idCnr1] = &cnr1
+ cids = append(cids, idCnr1)
+
+ // Create container with policy REP 1
+ cnr2 := container.Container{}
+ p2 := netmap.PlacementPolicy{}
+ p2.SetContainerBackupFactor(1)
+ x2 := netmap.ReplicaDescriptor{}
+ x2.SetNumberOfObjects(1)
+ p2.AddReplicas(x2)
+ x2 = netmap.ReplicaDescriptor{}
+ x2.SetNumberOfObjects(1)
+ p2.AddReplicas(x2)
+ cnr2.SetPlacementPolicy(p2)
+ cnr2.SetAttribute("cnr", "cnr2")
+
+ var idCnr2 cid.ID
+ container.CalculateID(&idCnr2, cnr2)
+ cnrmap[idCnr2] = &cnr2
+ cids = append(cids, idCnr2)
+
+ // Create container for simulate removing
+ cnr3 := container.Container{}
+ p3 := netmap.PlacementPolicy{}
+ p3.SetContainerBackupFactor(1)
+ x3 := netmap.ReplicaDescriptor{}
+ x3.SetNumberOfObjects(1)
+ p3.AddReplicas(x3)
+ cnr3.SetPlacementPolicy(p3)
+ cnr3.SetAttribute("cnr", "cnr3")
+
+ var idCnr3 cid.ID
+ container.CalculateID(&idCnr3, cnr3)
+ cids = append(cids, idCnr3)
+
+ e.SetContainerSource(&containerStorage{cntmap: cnrmap})
+
+ for _, sh := range ids {
+ for j := range 3 {
+ for range 4 {
+ obj := testutil.GenerateObjectWithCID(cids[j])
+ var putPrm shard.PutPrm
+ putPrm.SetObject(obj)
+ _, err := e.shards[sh.String()].Put(context.Background(), putPrm)
+ require.NoError(t, err)
+ }
+ }
+ }
+
+ var prm EvacuateShardPrm
+ prm.ShardID = ids[0:1]
+ prm.Scope = EvacuateScopeObjects
+ prm.RepOneOnly = true
+
+ require.NoError(t, e.shards[ids[0].String()].SetMode(mode.ReadOnly))
+
+ res, err := e.Evacuate(context.Background(), prm)
+ require.NoError(t, err)
+ require.Equal(t, uint64(4), res.ObjectsEvacuated())
+ require.Equal(t, uint64(8), res.ObjectsSkipped())
+ require.Equal(t, uint64(0), res.ObjectsFailed())
+}
+
+func TestEvacuateShardObjectsRepOneOnlyBench(t *testing.T) {
+ t.Skip()
+ e, ids, _ := newEngineEvacuate(t, 2, 0)
+ defer func() {
+ require.NoError(t, e.Close(context.Background()))
+ }()
+
+ cnrmap := make(map[cid.ID]*container.Container)
+ var cids []cid.ID
+ // Create containers with policy REP 1
+ for i := range 10_000 {
+ cnr1 := container.Container{}
+ p1 := netmap.PlacementPolicy{}
+ p1.SetContainerBackupFactor(1)
+ x1 := netmap.ReplicaDescriptor{}
+ x1.SetNumberOfObjects(2)
+ p1.AddReplicas(x1)
+ cnr1.SetPlacementPolicy(p1)
+ cnr1.SetAttribute("i", strconv.Itoa(i))
+
+ var idCnr1 cid.ID
+ container.CalculateID(&idCnr1, cnr1)
+
+ cnrmap[idCnr1] = &cnr1
+ cids = append(cids, idCnr1)
+ }
+
+ e.SetContainerSource(&containerStorage{
+ cntmap: cnrmap,
+ latency: time.Millisecond * 100,
+ })
+
+ for _, cnt := range cids {
+ for range 1 {
+ obj := testutil.GenerateObjectWithCID(cnt)
+ var putPrm shard.PutPrm
+ putPrm.SetObject(obj)
+ _, err := e.shards[ids[0].String()].Put(context.Background(), putPrm)
+ require.NoError(t, err)
+ }
+ }
+
+ var prm EvacuateShardPrm
+ prm.ShardID = ids[0:1]
+ prm.Scope = EvacuateScopeObjects
+ prm.RepOneOnly = true
+ prm.ContainerWorkerCount = 10
+
+ require.NoError(t, e.shards[ids[0].String()].SetMode(mode.ReadOnly))
+
+ start := time.Now()
+ _, err := e.Evacuate(context.Background(), prm)
+ t.Logf("evacuate took %v\n", time.Since(start))
+ require.NoError(t, err)
+}
diff --git a/pkg/local_object_storage/metabase/list.go b/pkg/local_object_storage/metabase/list.go
index 5943be7f4..44f25246e 100644
--- a/pkg/local_object_storage/metabase/list.go
+++ b/pkg/local_object_storage/metabase/list.go
@@ -76,6 +76,12 @@ type IterateOverObjectsInContainerPrm struct {
Handler func(context.Context, *objectcore.Info) error
}
+// CountAliveObjectsInBucketPrm contains parameters for IterateOverObjectsInContainer operation.
+type CountAliveObjectsInBucketPrm struct {
+ // BucketName container's bucket name.
+ BucketName []byte
+}
+
// ListWithCursor lists physical objects available in metabase starting from
// cursor. Includes objects of all types. Does not include inhumed objects.
// Use cursor value from response for consecutive requests.
@@ -426,3 +432,48 @@ func (db *DB) iterateOverObjectsInContainer(ctx context.Context, tx *bbolt.Tx, c
}
return nil
}
+
+// CountAliveObjectsInBucket count objects in bucket which aren't in graveyard or garbage.
+func (db *DB) CountAliveObjectsInBucket(ctx context.Context, prm CountAliveObjectsInBucketPrm) (uint64, error) {
+ var (
+ startedAt = time.Now()
+ success = false
+ )
+ defer func() {
+ db.metrics.AddMethodDuration("CountAliveObjectsInBucket", time.Since(startedAt), success)
+ }()
+ _, span := tracing.StartSpanFromContext(ctx, "metabase.CountAliveObjectsInBucket")
+ defer span.End()
+
+ db.modeMtx.RLock()
+ defer db.modeMtx.RUnlock()
+
+ if db.mode.NoMetabase() {
+ return 0, ErrDegradedMode
+ }
+
+ cidRaw := prm.BucketName[1:bucketKeySize]
+ if cidRaw == nil {
+ return 0, nil
+ }
+ var count uint64
+ err := db.boltDB.View(func(tx *bbolt.Tx) error {
+ bkt := tx.Bucket(prm.BucketName)
+ if bkt == nil {
+ return nil
+ }
+ graveyardBkt := tx.Bucket(graveyardBucketName)
+ garbageBkt := tx.Bucket(garbageBucketName)
+ c := bkt.Cursor()
+ k, _ := c.First()
+ for ; k != nil; k, _ = c.Next() {
+ if inGraveyardWithKey(append(cidRaw, k...), graveyardBkt, garbageBkt) > 0 {
+ continue
+ }
+ count++
+ }
+ return nil
+ })
+ success = err == nil
+ return count, metaerr.Wrap(err)
+}
diff --git a/pkg/local_object_storage/shard/list.go b/pkg/local_object_storage/shard/list.go
index 9f56ec750..f5d633b77 100644
--- a/pkg/local_object_storage/shard/list.go
+++ b/pkg/local_object_storage/shard/list.go
@@ -44,10 +44,16 @@ type IterateOverContainersPrm struct {
type IterateOverObjectsInContainerPrm struct {
// BucketName container's bucket name.
BucketName []byte
- // Handler function executed upon containers in db.
+ // Handler function executed upon objects in db.
Handler func(context.Context, *objectcore.Info) error
}
+// CountAliveObjectsInBucketPrm contains parameters for CountAliveObjectsInBucket operation.
+type CountAliveObjectsInBucketPrm struct {
+ // BucketName container's bucket name.
+ BucketName []byte
+}
+
// ListWithCursorPrm contains parameters for ListWithCursor operation.
type ListWithCursorPrm struct {
count uint32
@@ -229,3 +235,25 @@ func (s *Shard) IterateOverObjectsInContainer(ctx context.Context, prm IterateOv
return nil
}
+
+// CountAliveObjectsInBucket count objects in bucket which aren't in graveyard or garbage.
+func (s *Shard) CountAliveObjectsInBucket(ctx context.Context, prm CountAliveObjectsInBucketPrm) (uint64, error) {
+ _, span := tracing.StartSpanFromContext(ctx, "shard.CountAliveObjectsInBucket")
+ defer span.End()
+
+ s.m.RLock()
+ defer s.m.RUnlock()
+
+ if s.info.Mode.NoMetabase() {
+ return 0, ErrDegradedMode
+ }
+
+ var metaPrm meta.CountAliveObjectsInBucketPrm
+ metaPrm.BucketName = prm.BucketName
+ count, err := s.metaBase.CountAliveObjectsInBucket(ctx, metaPrm)
+ if err != nil {
+ return 0, fmt.Errorf("could not count alive objects in bucket: %w", err)
+ }
+
+ return count, nil
+}
diff --git a/pkg/services/control/server/evacuate_async.go b/pkg/services/control/server/evacuate_async.go
index bdc6f7c38..146ac7e16 100644
--- a/pkg/services/control/server/evacuate_async.go
+++ b/pkg/services/control/server/evacuate_async.go
@@ -31,6 +31,7 @@ func (s *Server) StartShardEvacuation(ctx context.Context, req *control.StartSha
Scope: engine.EvacuateScope(req.GetBody().GetScope()),
ContainerWorkerCount: req.GetBody().GetContainerWorkerCount(),
ObjectWorkerCount: req.GetBody().GetObjectWorkerCount(),
+ RepOneOnly: req.GetBody().GetRepOneOnly(),
}
_, err = s.s.Evacuate(ctx, prm)
diff --git a/pkg/services/control/service.proto b/pkg/services/control/service.proto
index 88a06de22..ae1939e13 100644
--- a/pkg/services/control/service.proto
+++ b/pkg/services/control/service.proto
@@ -398,6 +398,8 @@ message StartShardEvacuationRequest {
uint32 container_worker_count = 4;
// Count of concurrent object evacuation workers.
uint32 object_worker_count = 5;
+ // Choose for evacuation objects in `REP 1` containers only.
+ bool rep_one_only = 6;
}
Body body = 1;
diff --git a/pkg/services/control/service_frostfs.pb.go b/pkg/services/control/service_frostfs.pb.go
index e92a8acd1..e16f082b1 100644
--- a/pkg/services/control/service_frostfs.pb.go
+++ b/pkg/services/control/service_frostfs.pb.go
@@ -6516,6 +6516,7 @@ type StartShardEvacuationRequest_Body struct {
Scope uint32 `json:"scope"`
ContainerWorkerCount uint32 `json:"containerWorkerCount"`
ObjectWorkerCount uint32 `json:"objectWorkerCount"`
+ RepOneOnly bool `json:"repOneOnly"`
}
var (
@@ -6537,6 +6538,7 @@ func (x *StartShardEvacuationRequest_Body) StableSize() (size int) {
size += proto.UInt32Size(3, x.Scope)
size += proto.UInt32Size(4, x.ContainerWorkerCount)
size += proto.UInt32Size(5, x.ObjectWorkerCount)
+ size += proto.BoolSize(6, x.RepOneOnly)
return size
}
@@ -6568,6 +6570,9 @@ func (x *StartShardEvacuationRequest_Body) EmitProtobuf(mm *easyproto.MessageMar
if x.ObjectWorkerCount != 0 {
mm.AppendUint32(5, x.ObjectWorkerCount)
}
+ if x.RepOneOnly {
+ mm.AppendBool(6, x.RepOneOnly)
+ }
}
// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
@@ -6609,6 +6614,12 @@ func (x *StartShardEvacuationRequest_Body) UnmarshalProtobuf(src []byte) (err er
return fmt.Errorf("cannot unmarshal field %s", "ObjectWorkerCount")
}
x.ObjectWorkerCount = data
+ case 6: // RepOneOnly
+ data, ok := fc.Bool()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "RepOneOnly")
+ }
+ x.RepOneOnly = data
}
}
return nil
@@ -6658,6 +6669,15 @@ func (x *StartShardEvacuationRequest_Body) GetObjectWorkerCount() uint32 {
func (x *StartShardEvacuationRequest_Body) SetObjectWorkerCount(v uint32) {
x.ObjectWorkerCount = v
}
+func (x *StartShardEvacuationRequest_Body) GetRepOneOnly() bool {
+ if x != nil {
+ return x.RepOneOnly
+ }
+ return false
+}
+func (x *StartShardEvacuationRequest_Body) SetRepOneOnly(v bool) {
+ x.RepOneOnly = v
+}
// MarshalJSON implements the json.Marshaler interface.
func (x *StartShardEvacuationRequest_Body) MarshalJSON() ([]byte, error) {
@@ -6703,6 +6723,11 @@ func (x *StartShardEvacuationRequest_Body) MarshalEasyJSON(out *jwriter.Writer)
out.RawString(prefix)
out.Uint32(x.ObjectWorkerCount)
}
+ {
+ const prefix string = ",\"repOneOnly\":"
+ out.RawString(prefix)
+ out.Bool(x.RepOneOnly)
+ }
out.RawByte('}')
}
@@ -6768,6 +6793,12 @@ func (x *StartShardEvacuationRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
f = in.Uint32()
x.ObjectWorkerCount = f
}
+ case "repOneOnly":
+ {
+ var f bool
+ f = in.Bool()
+ x.RepOneOnly = f
+ }
}
in.WantComma()
}
From 7f8a1dcf8e238a08af84a1ef9e180541f783b71f Mon Sep 17 00:00:00 2001
From: Anton Nikiforov
Date: Mon, 30 Sep 2024 14:15:13 +0300
Subject: [PATCH 160/705] [#1400] adm: Support flag `alphabet-wallets` for
commands `proxy-add/remove-account`
Signed-off-by: Anton Nikiforov
---
cmd/frostfs-adm/internal/modules/morph/proxy/root.go | 2 ++
1 file changed, 2 insertions(+)
diff --git a/cmd/frostfs-adm/internal/modules/morph/proxy/root.go b/cmd/frostfs-adm/internal/modules/morph/proxy/root.go
index 082bc57d1..1854c8d2b 100644
--- a/cmd/frostfs-adm/internal/modules/morph/proxy/root.go
+++ b/cmd/frostfs-adm/internal/modules/morph/proxy/root.go
@@ -30,11 +30,13 @@ var (
func initProxyAddAccount() {
AddAccountCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
AddAccountCmd.Flags().String(accountAddressFlag, "", "Wallet address string")
+ AddAccountCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
}
func initProxyRemoveAccount() {
RemoveAccountCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
RemoveAccountCmd.Flags().String(accountAddressFlag, "", "Wallet address string")
+ RemoveAccountCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
}
func init() {
From a13219808a42f30839fe87ba3ea88a8fdd54f0ac Mon Sep 17 00:00:00 2001
From: Alexander Chuprov
Date: Fri, 27 Sep 2024 12:39:43 +0300
Subject: [PATCH 161/705] [#1375] node: Configure of the container cache size
Signed-off-by: Alexander Chuprov
---
cmd/frostfs-node/cache.go | 8 ++--
cmd/frostfs-node/config.go | 2 +
cmd/frostfs-node/config/morph/config.go | 15 ++++++
cmd/frostfs-node/container.go | 63 +++++++++++++------------
cmd/frostfs-node/morph.go | 1 +
config/example/node.yaml | 1 +
6 files changed, 55 insertions(+), 35 deletions(-)
diff --git a/cmd/frostfs-node/cache.go b/cmd/frostfs-node/cache.go
index 57f65d873..06142a46c 100644
--- a/cmd/frostfs-node/cache.go
+++ b/cmd/frostfs-node/cache.go
@@ -165,13 +165,11 @@ type ttlContainerStorage struct {
delInfoCache *ttlNetCache[cid.ID, *container.DelInfo]
}
-func newCachedContainerStorage(v container.Source, ttl time.Duration) ttlContainerStorage {
- const containerCacheSize = 100
-
- lruCnrCache := newNetworkTTLCache(containerCacheSize, ttl, func(id cid.ID) (*container.Container, error) {
+func newCachedContainerStorage(v container.Source, ttl time.Duration, containerCacheSize uint32) ttlContainerStorage {
+ lruCnrCache := newNetworkTTLCache(int(containerCacheSize), ttl, func(id cid.ID) (*container.Container, error) {
return v.Get(id)
}, metrics.NewCacheMetrics("container"))
- lruDelInfoCache := newNetworkTTLCache(containerCacheSize, ttl, func(id cid.ID) (*container.DelInfo, error) {
+ lruDelInfoCache := newNetworkTTLCache(int(containerCacheSize), ttl, func(id cid.ID) (*container.DelInfo, error) {
return v.DeletionInfo(id)
}, metrics.NewCacheMetrics("container_deletion_info"))
diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go
index c625b575f..58a96879f 100644
--- a/cmd/frostfs-node/config.go
+++ b/cmd/frostfs-node/config.go
@@ -570,6 +570,8 @@ type cfgMorph struct {
// TTL of Sidechain cached values. Non-positive value disables caching.
cacheTTL time.Duration
+ containerCacheSize uint32
+
proxyScriptHash neogoutil.Uint160
}
diff --git a/cmd/frostfs-node/config/morph/config.go b/cmd/frostfs-node/config/morph/config.go
index 1c536a0e2..d089870ea 100644
--- a/cmd/frostfs-node/config/morph/config.go
+++ b/cmd/frostfs-node/config/morph/config.go
@@ -30,6 +30,9 @@ const (
// FrostfsIDCacheSizeDefault is a default value of APE chain cache.
FrostfsIDCacheSizeDefault = 10_000
+
+ // ContainerCacheSizeDefault represents the default size for the container cache.
+ ContainerCacheSizeDefault = 100
)
var errNoMorphEndpoints = errors.New("no morph chain RPC endpoints, see `morph.rpc_endpoint` section")
@@ -103,6 +106,18 @@ func CacheTTL(c *config.Config) time.Duration {
return CacheTTLDefault
}
+// ContainerCacheSize returns the value of "container_cache_size" config parameter
+// from "morph" section.
+//
+// Returns 0 if the value is not positive integer.
+// Returns ContainerCacheSizeDefault if the value is missing.
+func ContainerCacheSize(c *config.Config) uint32 {
+ if c.Sub(subsection).Value("container_cache_size") == nil {
+ return ContainerCacheSizeDefault
+ }
+ return config.Uint32Safe(c.Sub(subsection), "container_cache_size")
+}
+
// SwitchInterval returns the value of "switch_interval" config parameter
// from "morph" section.
//
diff --git a/cmd/frostfs-node/container.go b/cmd/frostfs-node/container.go
index 6733140d2..729fcb8af 100644
--- a/cmd/frostfs-node/container.go
+++ b/cmd/frostfs-node/container.go
@@ -87,43 +87,46 @@ func configureEACLAndContainerSources(c *cfg, client *cntClient.Client, cnrSrc c
cnrRdr.lister = client
} else {
// use RPC node as source of Container contract items (with caching)
- cachedContainerStorage := newCachedContainerStorage(cnrSrc, c.cfgMorph.cacheTTL)
- cachedEACLStorage := newCachedEACLStorage(eACLFetcher, c.cfgMorph.cacheTTL)
+ c.cfgObject.cnrSource = cnrSrc
+ if c.cfgMorph.containerCacheSize > 0 {
+ containerCache := newCachedContainerStorage(cnrSrc, c.cfgMorph.cacheTTL, c.cfgMorph.containerCacheSize)
- subscribeToContainerCreation(c, func(e event.Event) {
- ev := e.(containerEvent.PutSuccess)
+ subscribeToContainerCreation(c, func(e event.Event) {
+ ev := e.(containerEvent.PutSuccess)
- // read owner of the created container in order to update the reading cache.
- // TODO: use owner directly from the event after neofs-contract#256 will become resolved
- // but don't forget about the profit of reading the new container and caching it:
- // creation success are most commonly tracked by polling GET op.
- cnr, err := cnrSrc.Get(ev.ID)
- if err == nil {
- cachedContainerStorage.containerCache.set(ev.ID, cnr, nil)
- } else {
- // unlike removal, we expect successful receive of the container
- // after successful creation, so logging can be useful
- c.log.Error(logs.FrostFSNodeReadNewlyCreatedContainerAfterTheNotification,
+ // read owner of the created container in order to update the reading cache.
+ // TODO: use owner directly from the event after neofs-contract#256 will become resolved
+ // but don't forget about the profit of reading the new container and caching it:
+ // creation success are most commonly tracked by polling GET op.
+ cnr, err := cnrSrc.Get(ev.ID)
+ if err == nil {
+ containerCache.containerCache.set(ev.ID, cnr, nil)
+ } else {
+ // unlike removal, we expect successful receive of the container
+ // after successful creation, so logging can be useful
+ c.log.Error(logs.FrostFSNodeReadNewlyCreatedContainerAfterTheNotification,
+ zap.Stringer("id", ev.ID),
+ zap.Error(err),
+ )
+ }
+
+ c.log.Debug(logs.FrostFSNodeContainerCreationEventsReceipt,
zap.Stringer("id", ev.ID),
- zap.Error(err),
)
- }
+ })
- c.log.Debug(logs.FrostFSNodeContainerCreationEventsReceipt,
- zap.Stringer("id", ev.ID),
- )
- })
-
- subscribeToContainerRemoval(c, func(e event.Event) {
- ev := e.(containerEvent.DeleteSuccess)
- cachedContainerStorage.handleRemoval(ev.ID)
- c.log.Debug(logs.FrostFSNodeContainerRemovalEventsReceipt,
- zap.Stringer("id", ev.ID),
- )
- })
+ subscribeToContainerRemoval(c, func(e event.Event) {
+ ev := e.(containerEvent.DeleteSuccess)
+ containerCache.handleRemoval(ev.ID)
+ c.log.Debug(logs.FrostFSNodeContainerRemovalEventsReceipt,
+ zap.Stringer("id", ev.ID),
+ )
+ })
+ c.cfgObject.cnrSource = containerCache
+ }
+ cachedEACLStorage := newCachedEACLStorage(eACLFetcher, c.cfgMorph.cacheTTL)
c.cfgObject.eaclSource = cachedEACLStorage
- c.cfgObject.cnrSource = cachedContainerStorage
cnrRdr.lister = client
cnrRdr.eacl = c.cfgObject.eaclSource
diff --git a/cmd/frostfs-node/morph.go b/cmd/frostfs-node/morph.go
index 7178cd97d..1bfcb8ac9 100644
--- a/cmd/frostfs-node/morph.go
+++ b/cmd/frostfs-node/morph.go
@@ -90,6 +90,7 @@ func initMorphComponents(ctx context.Context, c *cfg) {
var netmapSource netmap.Source
+ c.cfgMorph.containerCacheSize = morphconfig.ContainerCacheSize(c.appCfg)
c.cfgMorph.cacheTTL = morphconfig.CacheTTL(c.appCfg)
if c.cfgMorph.cacheTTL == 0 {
diff --git a/config/example/node.yaml b/config/example/node.yaml
index 86be35ba8..2a80fba18 100644
--- a/config/example/node.yaml
+++ b/config/example/node.yaml
@@ -81,6 +81,7 @@ morph:
cache_ttl: 15s # Sidechain cache TTL value (min interval between similar calls). Negative value disables caching.
# Default value: block time. It is recommended to have this value less or equal to block time.
# Cached entities: containers, container lists, eACL tables.
+ container_cache_size: 100 # container_cache_size is is the maximum number of containers in the cache.
switch_interval: 3m # interval b/w RPC switch attempts if the node is connected not to the highest priority node
rpc_endpoint: # side chain NEO RPC endpoints; are shuffled and used one by one until the first success
- address: wss://rpc1.morph.frostfs.info:40341/ws
From 54eb0058229965b7ddd704fe4da2e24f41c20f3f Mon Sep 17 00:00:00 2001
From: Evgenii Stratonikov
Date: Tue, 1 Oct 2024 14:39:36 +0300
Subject: [PATCH 162/705] [#1404] go.mod: Update api-go
Fix #1398
Fix #1399
Signed-off-by: Evgenii Stratonikov
---
go.mod | 2 +-
go.sum | 4 ++--
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/go.mod b/go.mod
index 9817f8527..1023948bc 100644
--- a/go.mod
+++ b/go.mod
@@ -4,7 +4,7 @@ go 1.22
require (
code.gitea.io/sdk/gitea v0.17.1
- git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240916093537-13fa0da3741e
+ git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20241001111852-d9a604fbc16f
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240909072709-3e221b973a3c
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d
diff --git a/go.sum b/go.sum
index 3c6dd9a99..5d719a027 100644
--- a/go.sum
+++ b/go.sum
@@ -1,7 +1,7 @@
code.gitea.io/sdk/gitea v0.17.1 h1:3jCPOG2ojbl8AcfaUCRYLT5MUcBMFwS0OSK2mA5Zok8=
code.gitea.io/sdk/gitea v0.17.1/go.mod h1:aCnBqhHpoEWA180gMbaCtdX9Pl6BWBAuuP2miadoTNM=
-git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240916093537-13fa0da3741e h1:740ABnOBYx4o6jxULHdSSnVW2fYIO35ohg+Uz59sxd0=
-git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240916093537-13fa0da3741e/go.mod h1:F5GS7hRb62PUy5sTYDC4ajVdeffoAfjHSSHTKUJEaYU=
+git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20241001111852-d9a604fbc16f h1:FZvX6CLzTQqMyMvOerIKMvIEJQbOImDjSooZx3AVRyE=
+git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20241001111852-d9a604fbc16f/go.mod h1:F5GS7hRb62PUy5sTYDC4ajVdeffoAfjHSSHTKUJEaYU=
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240909072709-3e221b973a3c h1:9JmXxWcqWaOXModBwtj/WlPYOfSk2jRSDoRsDRYvgnA=
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240909072709-3e221b973a3c/go.mod h1:DVb1AqP323wiEwCtlfoJJTERF4Xji2L39X6T8kv26RE=
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSVCB8JNSfPG7Uk4r2oSk=
From 9c5ddc4dfeb6447ae7d9cc0d74db551271ac6eb1 Mon Sep 17 00:00:00 2001
From: Airat Arifullin
Date: Wed, 2 Oct 2024 10:09:10 +0300
Subject: [PATCH 163/705] [#1407] tree: Set `ContainerOwner` in parameter for
`CheckAPE`
Signed-off-by: Airat Arifullin
---
pkg/services/tree/ape.go | 13 +++++++------
1 file changed, 7 insertions(+), 6 deletions(-)
diff --git a/pkg/services/tree/ape.go b/pkg/services/tree/ape.go
index 693b16e60..69cf59405 100644
--- a/pkg/services/tree/ape.go
+++ b/pkg/services/tree/ape.go
@@ -75,12 +75,13 @@ func (s *Service) checkAPE(ctx context.Context, bt *bearer.Token,
}
return s.apeChecker.CheckAPE(checkercore.CheckPrm{
- Request: request,
- Namespace: namespace,
- Container: cid,
- PublicKey: publicKey,
- BearerToken: bt,
- SoftAPECheck: false,
+ Request: request,
+ Namespace: namespace,
+ Container: cid,
+ ContainerOwner: container.Value.Owner(),
+ PublicKey: publicKey,
+ BearerToken: bt,
+ SoftAPECheck: false,
})
}
From 57c31e9802ad19b8d64388315cd53a05515e353e Mon Sep 17 00:00:00 2001
From: Ekaterina Lebedeva
Date: Tue, 1 Oct 2024 16:09:05 +0300
Subject: [PATCH 164/705] [#1306] node: Allow tombstone_lifetime config to be
loaded on the fly
Signed-off-by: Ekaterina Lebedeva
---
cmd/frostfs-node/config.go | 17 +++++++++++++++--
cmd/frostfs-node/object.go | 6 ++----
2 files changed, 17 insertions(+), 6 deletions(-)
diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go
index 58a96879f..4ad9ec6c6 100644
--- a/cmd/frostfs-node/config.go
+++ b/cmd/frostfs-node/config.go
@@ -105,6 +105,10 @@ type applicationConfiguration struct {
timestamp bool
}
+ ObjectCfg struct {
+ tombstoneLifetime uint64
+ }
+
EngineCfg struct {
errorThreshold uint32
shardPoolSize uint32
@@ -223,6 +227,10 @@ func (a *applicationConfiguration) readConfig(c *config.Config) error {
a.LoggerCfg.destination = loggerconfig.Destination(c)
a.LoggerCfg.timestamp = loggerconfig.Timestamp(c)
+ // Object
+
+ a.ObjectCfg.tombstoneLifetime = objectconfig.TombstoneLifetime(c)
+
// Storage Engine
a.EngineCfg.errorThreshold = engineconfig.ShardErrorThreshold(c)
@@ -624,7 +632,7 @@ type cfgObject struct {
cfgLocalStorage cfgLocalStorage
- tombstoneLifetime uint64
+ tombstoneLifetime *atomic.Uint64
skipSessionTokenIssuerVerification bool
}
@@ -815,9 +823,11 @@ func initCfgGRPC() cfgGRPC {
}
func initCfgObject(appCfg *config.Config) cfgObject {
+ var tsLifetime atomic.Uint64
+ tsLifetime.Store(objectconfig.TombstoneLifetime(appCfg))
return cfgObject{
pool: initObjectPool(appCfg),
- tombstoneLifetime: objectconfig.TombstoneLifetime(appCfg),
+ tombstoneLifetime: &tsLifetime,
skipSessionTokenIssuerVerification: objectconfig.Put(appCfg).SkipSessionTokenIssuerVerification(),
}
}
@@ -1296,6 +1306,9 @@ func (c *cfg) reloadConfig(ctx context.Context) {
components := c.getComponents(ctx, logPrm)
+ // Object
+ c.cfgObject.tombstoneLifetime.Store(c.ObjectCfg.tombstoneLifetime)
+
// Storage Engine
var rcfg engine.ReConfiguration
diff --git a/cmd/frostfs-node/object.go b/cmd/frostfs-node/object.go
index 9d4e35ca8..47649c88b 100644
--- a/cmd/frostfs-node/object.go
+++ b/cmd/frostfs-node/object.go
@@ -109,13 +109,12 @@ func (s *objectSvc) GetRangeHash(ctx context.Context, req *object.GetRangeHashRe
type delNetInfo struct {
netmap.State
- tsLifetime uint64
cfg *cfg
}
func (i *delNetInfo) TombstoneLifetime() (uint64, error) {
- return i.tsLifetime, nil
+ return i.cfg.cfgObject.tombstoneLifetime.Load(), nil
}
// returns node owner ID calculated from configured private key.
@@ -424,8 +423,7 @@ func createDeleteService(c *cfg, keyStorage *util.KeyStorage, sGet *getsvc.Servi
sSearch,
sPut,
&delNetInfo{
- State: c.cfgNetmap.state,
- tsLifetime: c.cfgObject.tombstoneLifetime,
+ State: c.cfgNetmap.state,
cfg: c,
},
From f45e75e3eb781662abbbc4f6820076c492fc1f0d Mon Sep 17 00:00:00 2001
From: Evgenii Stratonikov
Date: Wed, 2 Oct 2024 11:18:35 +0300
Subject: [PATCH 165/705] [#1409] adm: Do not bind DeltaFlag to viper
We bind flag that could be specified in config.
This is not a config flag, just a command option.
Also fix TestInitialize failures:
```
Error: Received unexpected error:
number of epochs cannot be less than 1
Test: TestInitialize/16_nodes/force-new-epoch
```
Refs #1372 (945b7c740b0deb4)
Signed-off-by: Evgenii Stratonikov
---
cmd/frostfs-adm/internal/modules/morph/netmap/epoch.go | 3 ++-
cmd/frostfs-adm/internal/modules/morph/netmap/root.go | 1 -
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/cmd/frostfs-adm/internal/modules/morph/netmap/epoch.go b/cmd/frostfs-adm/internal/modules/morph/netmap/epoch.go
index 5e4e9c725..5c5fa9988 100644
--- a/cmd/frostfs-adm/internal/modules/morph/netmap/epoch.go
+++ b/cmd/frostfs-adm/internal/modules/morph/netmap/epoch.go
@@ -31,7 +31,8 @@ func ForceNewEpochCmd(cmd *cobra.Command, _ []string) error {
}
bw := io.NewBufBinWriter()
- if err := helper.EmitNewEpochCall(bw, wCtx, nmHash, viper.GetInt64(commonflags.DeltaFlag)); err != nil {
+ delta, _ := cmd.Flags().GetInt64(commonflags.DeltaFlag)
+ if err := helper.EmitNewEpochCall(bw, wCtx, nmHash, delta); err != nil {
return err
}
diff --git a/cmd/frostfs-adm/internal/modules/morph/netmap/root.go b/cmd/frostfs-adm/internal/modules/morph/netmap/root.go
index 0288bcdc5..3300db36a 100644
--- a/cmd/frostfs-adm/internal/modules/morph/netmap/root.go
+++ b/cmd/frostfs-adm/internal/modules/morph/netmap/root.go
@@ -22,7 +22,6 @@ var (
PreRun: func(cmd *cobra.Command, _ []string) {
_ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
_ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
- _ = viper.BindPFlag(commonflags.DeltaFlag, cmd.Flags().Lookup(commonflags.DeltaFlag))
},
RunE: ForceNewEpochCmd,
}
From 62028cd7ee0b5d825b71cfa11d1d87369b1da23d Mon Sep 17 00:00:00 2001
From: Evgenii Stratonikov
Date: Wed, 2 Oct 2024 11:20:09 +0300
Subject: [PATCH 166/705] [#1409] adm: Uncommonize DeltaFlag
It is used only in `force-new-epoch`, it is not _common_ between
multiple commands.
Signed-off-by: Evgenii Stratonikov
---
cmd/frostfs-adm/internal/commonflags/flags.go | 1 -
cmd/frostfs-adm/internal/modules/morph/netmap/epoch.go | 5 +++--
cmd/frostfs-adm/internal/modules/morph/netmap/root.go | 2 +-
3 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/cmd/frostfs-adm/internal/commonflags/flags.go b/cmd/frostfs-adm/internal/commonflags/flags.go
index b51d2e115..81395edb0 100644
--- a/cmd/frostfs-adm/internal/commonflags/flags.go
+++ b/cmd/frostfs-adm/internal/commonflags/flags.go
@@ -39,5 +39,4 @@ const (
CustomZoneFlag = "domain"
AlphabetSizeFlag = "size"
AllFlag = "all"
- DeltaFlag = "delta"
)
diff --git a/cmd/frostfs-adm/internal/modules/morph/netmap/epoch.go b/cmd/frostfs-adm/internal/modules/morph/netmap/epoch.go
index 5c5fa9988..94223dbd0 100644
--- a/cmd/frostfs-adm/internal/modules/morph/netmap/epoch.go
+++ b/cmd/frostfs-adm/internal/modules/morph/netmap/epoch.go
@@ -4,7 +4,6 @@ import (
"fmt"
"strings"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
"github.com/nspcc-dev/neo-go/pkg/io"
@@ -13,6 +12,8 @@ import (
"github.com/spf13/viper"
)
+const deltaFlag = "delta"
+
func ForceNewEpochCmd(cmd *cobra.Command, _ []string) error {
wCtx, err := helper.NewInitializeContext(cmd, viper.GetViper())
if err != nil {
@@ -31,7 +32,7 @@ func ForceNewEpochCmd(cmd *cobra.Command, _ []string) error {
}
bw := io.NewBufBinWriter()
- delta, _ := cmd.Flags().GetInt64(commonflags.DeltaFlag)
+ delta, _ := cmd.Flags().GetInt64(deltaFlag)
if err := helper.EmitNewEpochCall(bw, wCtx, nmHash, delta); err != nil {
return err
}
diff --git a/cmd/frostfs-adm/internal/modules/morph/netmap/root.go b/cmd/frostfs-adm/internal/modules/morph/netmap/root.go
index 3300db36a..55b7e64f0 100644
--- a/cmd/frostfs-adm/internal/modules/morph/netmap/root.go
+++ b/cmd/frostfs-adm/internal/modules/morph/netmap/root.go
@@ -35,7 +35,7 @@ func initForceNewEpochCmd() {
ForceNewEpoch.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
ForceNewEpoch.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
ForceNewEpoch.Flags().String(commonflags.LocalDumpFlag, "", "Path to the blocks dump file")
- ForceNewEpoch.Flags().Int64(commonflags.DeltaFlag, 1, "Number of epochs to increase the current epoch")
+ ForceNewEpoch.Flags().Int64(deltaFlag, 1, "Number of epochs to increase the current epoch")
}
func init() {
From f83f7feb8caa0ef5ab9a952a6a6d3e2f12a63486 Mon Sep 17 00:00:00 2001
From: Evgenii Stratonikov
Date: Wed, 2 Oct 2024 11:01:22 +0300
Subject: [PATCH 167/705] [#1391] adm: Properly check whether transfers were
made
Signed-off-by: Evgenii Stratonikov
---
.../morph/initialize/initialize_transfer.go | 19 +++++++++++++------
1 file changed, 13 insertions(+), 6 deletions(-)
diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_transfer.go b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_transfer.go
index d7b0ec86c..7f1bfee2b 100644
--- a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_transfer.go
+++ b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_transfer.go
@@ -27,12 +27,12 @@ const (
initialAlphabetGASAmount = 10_000 * native.GASFactor
// initialProxyGASAmount represents the amount of GAS given to a proxy contract.
initialProxyGASAmount = 50_000 * native.GASFactor
- // alphabetGasRatio is a coefficient that defines the threshold below which
- // the balance of the alphabet node is considered not replenished. The value
- // of this coefficient is determined empirically.
- alphabetGasRatio = 5
)
+func initialCommitteeGASAmount(c *helper.InitializeContext) int64 {
+ return (gasInitialTotalSupply - initialAlphabetGASAmount*int64(len(c.Wallets))) / 2
+}
+
func transferFunds(c *helper.InitializeContext) error {
ok, err := transferFundsFinished(c)
if ok || err != nil {
@@ -59,7 +59,7 @@ func transferFunds(c *helper.InitializeContext) error {
transferTarget{
Token: gas.Hash,
Address: c.CommitteeAcc.Contract.ScriptHash(),
- Amount: (gasInitialTotalSupply - initialAlphabetGASAmount*int64(len(c.Wallets))) / 2,
+ Amount: initialCommitteeGASAmount(c),
},
transferTarget{
Token: neo.Hash,
@@ -80,12 +80,19 @@ func transferFunds(c *helper.InitializeContext) error {
return c.AwaitTx()
}
+// transferFundsFinished checks balances of accounts we transfer GAS to.
+// The stage is considered finished if the balance is greater than the half of what we need to transfer.
func transferFundsFinished(c *helper.InitializeContext) (bool, error) {
acc := c.Accounts[0]
r := nep17.NewReader(c.ReadOnlyInvoker, gas.Hash)
res, err := r.BalanceOf(acc.Contract.ScriptHash())
- return res.Cmp(big.NewInt(alphabetGasRatio*native.GASFactor)) == 1, err
+ if err != nil || res.Cmp(big.NewInt(initialAlphabetGASAmount/2)) != 1 {
+ return false, err
+ }
+
+ res, err = r.BalanceOf(c.CommitteeAcc.ScriptHash())
+ return res != nil && res.Cmp(big.NewInt(initialCommitteeGASAmount(c)/2)) == 1, err
}
func transferGASToProxy(c *helper.InitializeContext) error {
From 434048e8d959b29375c0d63a112b8eb8df8792d8 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Wed, 2 Oct 2024 11:28:00 +0300
Subject: [PATCH 168/705] [#1408] metabase: Fix EC search with slow and fast
filters
Signed-off-by: Dmitrii Stepanov
---
pkg/local_object_storage/metabase/select.go | 35 ++++++++++++-
.../metabase/select_test.go | 50 +++++++++++++++++++
2 files changed, 83 insertions(+), 2 deletions(-)
diff --git a/pkg/local_object_storage/metabase/select.go b/pkg/local_object_storage/metabase/select.go
index ed43fc41f..85d1b08ba 100644
--- a/pkg/local_object_storage/metabase/select.go
+++ b/pkg/local_object_storage/metabase/select.go
@@ -389,8 +389,7 @@ func (db *DB) matchSlowFilters(tx *bbolt.Tx, addr oid.Address, f objectSDK.Searc
return result, true
}
- buf := make([]byte, addressKeySize)
- obj, err := db.get(tx, addr, buf, true, false, currEpoch)
+ obj, isECChunk, err := db.getObjectForSlowFilters(tx, addr, currEpoch)
if err != nil {
return result, false
}
@@ -401,17 +400,26 @@ func (db *DB) matchSlowFilters(tx *bbolt.Tx, addr oid.Address, f objectSDK.Searc
case v2object.FilterHeaderVersion:
data = []byte(obj.Version().String())
case v2object.FilterHeaderHomomorphicHash:
+ if isECChunk {
+ return result, false // EC chunk and EC parent hashes are incomparable
+ }
cs, _ := obj.PayloadHomomorphicHash()
data = cs.Value()
case v2object.FilterHeaderCreationEpoch:
data = make([]byte, 8)
binary.LittleEndian.PutUint64(data, obj.CreationEpoch())
case v2object.FilterHeaderPayloadLength:
+ if isECChunk {
+ return result, false // EC chunk and EC parent payload lengths are incomparable
+ }
data = make([]byte, 8)
binary.LittleEndian.PutUint64(data, obj.PayloadSize())
case v2object.FilterHeaderOwnerID:
data = []byte(obj.OwnerID().EncodeToString())
case v2object.FilterHeaderPayloadHash:
+ if isECChunk {
+ return result, false // EC chunk and EC parent payload hashes are incomparable
+ }
cs, _ := obj.PayloadChecksum()
data = cs.Value()
default: // user attribute
@@ -439,6 +447,29 @@ func (db *DB) matchSlowFilters(tx *bbolt.Tx, addr oid.Address, f objectSDK.Searc
return result, true
}
+func (db *DB) getObjectForSlowFilters(tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (*objectSDK.Object, bool, error) {
+ buf := make([]byte, addressKeySize)
+ obj, err := db.get(tx, addr, buf, true, false, currEpoch)
+ if err != nil {
+ var ecInfoError *objectSDK.ECInfoError
+ if errors.As(err, &ecInfoError) {
+ for _, chunk := range ecInfoError.ECInfo().Chunks {
+ var objID oid.ID
+ if err = objID.ReadFromV2(chunk.ID); err != nil {
+ continue
+ }
+ addr.SetObject(objID)
+ obj, err = db.get(tx, addr, buf, true, false, currEpoch)
+ if err == nil {
+ return obj, true, nil
+ }
+ }
+ }
+ return nil, false, err
+ }
+ return obj, false, nil
+}
+
func attributeValue(obj *objectSDK.Object, attribute string) (string, bool) {
objectAttributes := obj.Attributes()
if ech := obj.ECHeader(); ech != nil {
diff --git a/pkg/local_object_storage/metabase/select_test.go b/pkg/local_object_storage/metabase/select_test.go
index fcd5d3a90..0c6ebc863 100644
--- a/pkg/local_object_storage/metabase/select_test.go
+++ b/pkg/local_object_storage/metabase/select_test.go
@@ -762,6 +762,56 @@ func TestDB_SelectOwnerID(t *testing.T) {
})
}
+func TestDB_SelectECWithFastAndSlowFilters(t *testing.T) {
+ t.Parallel()
+
+ db := newDB(t)
+ defer func() { require.NoError(t, db.Close()) }()
+
+ cnr := cidtest.ID()
+ ecChunk1 := oidtest.ID()
+ ecChunk2 := oidtest.ID()
+ ecParent := oidtest.ID()
+ var ecParentAddr oid.Address
+ ecParentAddr.SetContainer(cnr)
+ ecParentAddr.SetObject(ecParent)
+ var ecParentAttr []objectSDK.Attribute
+ var attr objectSDK.Attribute
+ attr.SetKey(objectSDK.AttributeFilePath)
+ attr.SetValue("/1/2/3")
+ ecParentAttr = append(ecParentAttr, attr)
+
+ chunkObj := testutil.GenerateObjectWithCID(cnr)
+ chunkObj.SetContainerID(cnr)
+ chunkObj.SetID(ecChunk1)
+ chunkObj.SetPayload([]byte{0, 1, 2, 3, 4})
+ chunkObj.SetPayloadSize(uint64(5))
+ chunkObj.SetECHeader(objectSDK.NewECHeader(objectSDK.ECParentInfo{ID: ecParent, Attributes: ecParentAttr}, 0, 3, []byte{}, 0))
+
+ chunkObj2 := testutil.GenerateObjectWithCID(cnr)
+ chunkObj2.SetContainerID(cnr)
+ chunkObj2.SetID(ecChunk2)
+ chunkObj2.SetPayload([]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9})
+ chunkObj2.SetPayloadSize(uint64(10))
+ chunkObj2.SetECHeader(objectSDK.NewECHeader(objectSDK.ECParentInfo{ID: ecParent, Attributes: ecParentAttr}, 1, 3, []byte{}, 0))
+
+ // put object with EC
+
+ var prm meta.PutPrm
+ prm.SetObject(chunkObj)
+ _, err := db.Put(context.Background(), prm)
+ require.NoError(t, err)
+
+ prm.SetObject(chunkObj2)
+ _, err = db.Put(context.Background(), prm)
+ require.NoError(t, err)
+
+ fs := objectSDK.SearchFilters{}
+ fs.AddRootFilter()
+ fs.AddFilter(objectSDK.AttributeFilePath, "/1/2/3", objectSDK.MatchCommonPrefix)
+ testSelect(t, db, cnr, fs, ecParentAddr)
+}
+
type testTarget struct {
objects []*objectSDK.Object
}
From 01e3944b31e7daed8ca855244b833302daabe9cc Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Wed, 2 Oct 2024 14:36:10 +0300
Subject: [PATCH 169/705] [#1408] metabase: Fix tests
No need to specify container ID for objects created with `testutil.GenerateObjectWithCID`.
Signed-off-by: Dmitrii Stepanov
---
pkg/local_object_storage/metabase/delete_ec_test.go | 1 -
pkg/local_object_storage/metabase/inhume_ec_test.go | 2 --
pkg/local_object_storage/metabase/select_test.go | 2 --
3 files changed, 5 deletions(-)
diff --git a/pkg/local_object_storage/metabase/delete_ec_test.go b/pkg/local_object_storage/metabase/delete_ec_test.go
index 66c79ecd7..a25627990 100644
--- a/pkg/local_object_storage/metabase/delete_ec_test.go
+++ b/pkg/local_object_storage/metabase/delete_ec_test.go
@@ -39,7 +39,6 @@ func TestDeleteECObject_WithoutSplit(t *testing.T) {
tombstoneID := oidtest.ID()
chunkObj := testutil.GenerateObjectWithCID(cnr)
- chunkObj.SetContainerID(cnr)
chunkObj.SetID(ecChunk)
chunkObj.SetPayload([]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9})
chunkObj.SetPayloadSize(uint64(10))
diff --git a/pkg/local_object_storage/metabase/inhume_ec_test.go b/pkg/local_object_storage/metabase/inhume_ec_test.go
index c3b1e72da..32e412c79 100644
--- a/pkg/local_object_storage/metabase/inhume_ec_test.go
+++ b/pkg/local_object_storage/metabase/inhume_ec_test.go
@@ -35,14 +35,12 @@ func TestInhumeECObject(t *testing.T) {
tombstoneID := oidtest.ID()
chunkObj := testutil.GenerateObjectWithCID(cnr)
- chunkObj.SetContainerID(cnr)
chunkObj.SetID(ecChunk)
chunkObj.SetPayload([]byte{0, 1, 2, 3, 4})
chunkObj.SetPayloadSize(uint64(5))
chunkObj.SetECHeader(objectSDK.NewECHeader(objectSDK.ECParentInfo{ID: ecParent}, 0, 3, []byte{}, 0))
chunkObj2 := testutil.GenerateObjectWithCID(cnr)
- chunkObj2.SetContainerID(cnr)
chunkObj2.SetID(ecChunk2)
chunkObj2.SetPayload([]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9})
chunkObj2.SetPayloadSize(uint64(10))
diff --git a/pkg/local_object_storage/metabase/select_test.go b/pkg/local_object_storage/metabase/select_test.go
index 0c6ebc863..bee778e2b 100644
--- a/pkg/local_object_storage/metabase/select_test.go
+++ b/pkg/local_object_storage/metabase/select_test.go
@@ -782,14 +782,12 @@ func TestDB_SelectECWithFastAndSlowFilters(t *testing.T) {
ecParentAttr = append(ecParentAttr, attr)
chunkObj := testutil.GenerateObjectWithCID(cnr)
- chunkObj.SetContainerID(cnr)
chunkObj.SetID(ecChunk1)
chunkObj.SetPayload([]byte{0, 1, 2, 3, 4})
chunkObj.SetPayloadSize(uint64(5))
chunkObj.SetECHeader(objectSDK.NewECHeader(objectSDK.ECParentInfo{ID: ecParent, Attributes: ecParentAttr}, 0, 3, []byte{}, 0))
chunkObj2 := testutil.GenerateObjectWithCID(cnr)
- chunkObj2.SetContainerID(cnr)
chunkObj2.SetID(ecChunk2)
chunkObj2.SetPayload([]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9})
chunkObj2.SetPayloadSize(uint64(10))
From 6c46044c9cba5f2e20e105b3efa7abe166fbf577 Mon Sep 17 00:00:00 2001
From: Evgenii Stratonikov
Date: Thu, 3 Oct 2024 10:19:26 +0300
Subject: [PATCH 170/705] [#1410] shard: Move MetricsWriter interface to a
separate file
Signed-off-by: Evgenii Stratonikov
---
pkg/local_object_storage/shard/metrics.go | 47 +++++++++++++++++++++++
pkg/local_object_storage/shard/shard.go | 44 ---------------------
2 files changed, 47 insertions(+), 44 deletions(-)
create mode 100644 pkg/local_object_storage/shard/metrics.go
diff --git a/pkg/local_object_storage/shard/metrics.go b/pkg/local_object_storage/shard/metrics.go
new file mode 100644
index 000000000..568c0de5e
--- /dev/null
+++ b/pkg/local_object_storage/shard/metrics.go
@@ -0,0 +1,47 @@
+package shard
+
+import "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
+
+// MetricsWriter is an interface that must store shard's metrics.
+type MetricsWriter interface {
+ // SetObjectCounter must set object counter taking into account object type.
+ SetObjectCounter(objectType string, v uint64)
+ // AddToObjectCounter must update object counter taking into account object
+ // type.
+ // Negative parameter must decrease the counter.
+ AddToObjectCounter(objectType string, delta int)
+ // AddToContainerSize must add a value to the container size.
+ // Value can be negative.
+ AddToContainerSize(cnr string, value int64)
+ // AddToPayloadSize must add a value to the payload size.
+ // Value can be negative.
+ AddToPayloadSize(value int64)
+ // IncObjectCounter must increment shard's object counter taking into account
+ // object type.
+ IncObjectCounter(objectType string)
+ // SetShardID must set (update) the shard identifier that will be used in
+ // metrics.
+ SetShardID(id string)
+ // SetReadonly must set shard mode.
+ SetMode(mode mode.Mode)
+ // IncErrorCounter increment error counter.
+ IncErrorCounter()
+ // ClearErrorCounter clear error counter.
+ ClearErrorCounter()
+ // DeleteShardMetrics deletes shard metrics from registry.
+ DeleteShardMetrics()
+ // SetContainerObjectsCount sets container object count.
+ SetContainerObjectsCount(cnrID string, objectType string, value uint64)
+ // IncContainerObjectsCount increments container object count.
+ IncContainerObjectsCount(cnrID string, objectType string)
+ // SubContainerObjectsCount subtracts container object count.
+ SubContainerObjectsCount(cnrID string, objectType string, value uint64)
+ // IncRefillObjectsCount increments refill objects count.
+ IncRefillObjectsCount(path string, size int, success bool)
+ // SetRefillPercent sets refill percent.
+ SetRefillPercent(path string, percent uint32)
+ // SetRefillStatus sets refill status.
+ SetRefillStatus(path string, status string)
+ // SetEvacuationInProgress sets evacuation status
+ SetEvacuationInProgress(value bool)
+}
diff --git a/pkg/local_object_storage/shard/shard.go b/pkg/local_object_storage/shard/shard.go
index 7496fc352..f5317b16c 100644
--- a/pkg/local_object_storage/shard/shard.go
+++ b/pkg/local_object_storage/shard/shard.go
@@ -57,50 +57,6 @@ type DeletedLockCallback func(context.Context, []oid.Address)
// EmptyContainersCallback is a callback hanfling list of zero-size and zero-count containers.
type EmptyContainersCallback func(context.Context, []cid.ID)
-// MetricsWriter is an interface that must store shard's metrics.
-type MetricsWriter interface {
- // SetObjectCounter must set object counter taking into account object type.
- SetObjectCounter(objectType string, v uint64)
- // AddToObjectCounter must update object counter taking into account object
- // type.
- // Negative parameter must decrease the counter.
- AddToObjectCounter(objectType string, delta int)
- // AddToContainerSize must add a value to the container size.
- // Value can be negative.
- AddToContainerSize(cnr string, value int64)
- // AddToPayloadSize must add a value to the payload size.
- // Value can be negative.
- AddToPayloadSize(value int64)
- // IncObjectCounter must increment shard's object counter taking into account
- // object type.
- IncObjectCounter(objectType string)
- // SetShardID must set (update) the shard identifier that will be used in
- // metrics.
- SetShardID(id string)
- // SetReadonly must set shard mode.
- SetMode(mode mode.Mode)
- // IncErrorCounter increment error counter.
- IncErrorCounter()
- // ClearErrorCounter clear error counter.
- ClearErrorCounter()
- // DeleteShardMetrics deletes shard metrics from registry.
- DeleteShardMetrics()
- // SetContainerObjectsCount sets container object count.
- SetContainerObjectsCount(cnrID string, objectType string, value uint64)
- // IncContainerObjectsCount increments container object count.
- IncContainerObjectsCount(cnrID string, objectType string)
- // SubContainerObjectsCount subtracts container object count.
- SubContainerObjectsCount(cnrID string, objectType string, value uint64)
- // IncRefillObjectsCount increments refill objects count.
- IncRefillObjectsCount(path string, size int, success bool)
- // SetRefillPercent sets refill percent.
- SetRefillPercent(path string, percent uint32)
- // SetRefillStatus sets refill status.
- SetRefillStatus(path string, status string)
- // SetEvacuationInProgress sets evacuation status
- SetEvacuationInProgress(value bool)
-}
-
type cfg struct {
m sync.RWMutex
From 9206ce5cd2ea973feef6a53ae0453736efacbe11 Mon Sep 17 00:00:00 2001
From: Evgenii Stratonikov
Date: Thu, 3 Oct 2024 10:23:59 +0300
Subject: [PATCH 171/705] [#1410] shard: Provide the default implementation for
MetricsWriter
Signed-off-by: Evgenii Stratonikov
---
pkg/local_object_storage/shard/id.go | 4 +-
pkg/local_object_storage/shard/metrics.go | 22 ++++++++++
pkg/local_object_storage/shard/mode.go | 4 +-
pkg/local_object_storage/shard/shard.go | 49 +++++++++--------------
4 files changed, 42 insertions(+), 37 deletions(-)
diff --git a/pkg/local_object_storage/shard/id.go b/pkg/local_object_storage/shard/id.go
index 2fe68d270..a72313498 100644
--- a/pkg/local_object_storage/shard/id.go
+++ b/pkg/local_object_storage/shard/id.go
@@ -45,9 +45,7 @@ func (s *Shard) UpdateID() (err error) {
}
shardID := s.info.ID.String()
- if s.cfg.metricsWriter != nil {
- s.cfg.metricsWriter.SetShardID(shardID)
- }
+ s.cfg.metricsWriter.SetShardID(shardID)
if s.writeCache != nil && s.writeCache.GetMetrics() != nil {
s.writeCache.GetMetrics().SetShardID(shardID)
}
diff --git a/pkg/local_object_storage/shard/metrics.go b/pkg/local_object_storage/shard/metrics.go
index 568c0de5e..6bf198048 100644
--- a/pkg/local_object_storage/shard/metrics.go
+++ b/pkg/local_object_storage/shard/metrics.go
@@ -45,3 +45,25 @@ type MetricsWriter interface {
// SetEvacuationInProgress sets evacuation status
SetEvacuationInProgress(value bool)
}
+
+type noopMetrics struct{}
+
+var _ MetricsWriter = noopMetrics{}
+
+func (noopMetrics) SetObjectCounter(string, uint64) {}
+func (noopMetrics) AddToObjectCounter(string, int) {}
+func (noopMetrics) AddToContainerSize(string, int64) {}
+func (noopMetrics) AddToPayloadSize(int64) {}
+func (noopMetrics) IncObjectCounter(string) {}
+func (noopMetrics) SetShardID(string) {}
+func (noopMetrics) SetMode(mode.Mode) {}
+func (noopMetrics) IncErrorCounter() {}
+func (noopMetrics) ClearErrorCounter() {}
+func (noopMetrics) DeleteShardMetrics() {}
+func (noopMetrics) SetContainerObjectsCount(string, string, uint64) {}
+func (noopMetrics) IncContainerObjectsCount(string, string) {}
+func (noopMetrics) SubContainerObjectsCount(string, string, uint64) {}
+func (noopMetrics) IncRefillObjectsCount(string, int, bool) {}
+func (noopMetrics) SetRefillPercent(string, uint32) {}
+func (noopMetrics) SetRefillStatus(string, string) {}
+func (noopMetrics) SetEvacuationInProgress(bool) {}
diff --git a/pkg/local_object_storage/shard/mode.go b/pkg/local_object_storage/shard/mode.go
index 1bab57448..d90a5f4b6 100644
--- a/pkg/local_object_storage/shard/mode.go
+++ b/pkg/local_object_storage/shard/mode.go
@@ -65,9 +65,7 @@ func (s *Shard) setMode(m mode.Mode) error {
}
s.info.Mode = m
- if s.metricsWriter != nil {
- s.metricsWriter.SetMode(s.info.Mode)
- }
+ s.metricsWriter.SetMode(s.info.Mode)
s.log.Info(logs.ShardShardModeSetSuccessfully,
zap.Stringer("mode", s.info.Mode))
diff --git a/pkg/local_object_storage/shard/shard.go b/pkg/local_object_storage/shard/shard.go
index f5317b16c..a57b548be 100644
--- a/pkg/local_object_storage/shard/shard.go
+++ b/pkg/local_object_storage/shard/shard.go
@@ -105,6 +105,7 @@ func defaultCfg() *cfg {
reportErrorFunc: func(string, string, error) {},
zeroSizeContainersCallback: func(context.Context, []cid.ID) {},
zeroCountContainersCallback: func(context.Context, []cid.ID) {},
+ metricsWriter: noopMetrics{},
}
}
@@ -384,7 +385,7 @@ const (
)
func (s *Shard) updateMetrics(ctx context.Context) {
- if s.cfg.metricsWriter == nil || s.GetMode().NoMetabase() {
+ if s.GetMode().NoMetabase() {
return
}
@@ -439,35 +440,29 @@ func (s *Shard) updateMetrics(ctx context.Context) {
// incObjectCounter increment both physical and logical object
// counters.
func (s *Shard) incObjectCounter(cnrID cid.ID, isUser bool) {
- if s.cfg.metricsWriter != nil {
- s.cfg.metricsWriter.IncObjectCounter(physical)
- s.cfg.metricsWriter.IncObjectCounter(logical)
- s.cfg.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), physical)
- s.cfg.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), logical)
- if isUser {
- s.cfg.metricsWriter.IncObjectCounter(user)
- s.cfg.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), user)
- }
+ s.cfg.metricsWriter.IncObjectCounter(physical)
+ s.cfg.metricsWriter.IncObjectCounter(logical)
+ s.cfg.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), physical)
+ s.cfg.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), logical)
+ if isUser {
+ s.cfg.metricsWriter.IncObjectCounter(user)
+ s.cfg.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), user)
}
}
func (s *Shard) decObjectCounterBy(typ string, v uint64) {
- if s.cfg.metricsWriter != nil && v > 0 {
+ if v > 0 {
s.cfg.metricsWriter.AddToObjectCounter(typ, -int(v))
}
}
func (s *Shard) setObjectCounterBy(typ string, v uint64) {
- if s.cfg.metricsWriter != nil && v > 0 {
+ if v > 0 {
s.cfg.metricsWriter.SetObjectCounter(typ, v)
}
}
func (s *Shard) decContainerObjectCounter(byCnr map[cid.ID]meta.ObjectCounters) {
- if s.cfg.metricsWriter == nil {
- return
- }
-
for cnrID, count := range byCnr {
if count.Phy > 0 {
s.cfg.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), physical, count.Phy)
@@ -482,46 +477,38 @@ func (s *Shard) decContainerObjectCounter(byCnr map[cid.ID]meta.ObjectCounters)
}
func (s *Shard) addToContainerSize(cnr string, size int64) {
- if s.cfg.metricsWriter != nil && size != 0 {
+ if size != 0 {
s.cfg.metricsWriter.AddToContainerSize(cnr, size)
}
}
func (s *Shard) addToPayloadSize(size int64) {
- if s.cfg.metricsWriter != nil && size != 0 {
+ if size != 0 {
s.cfg.metricsWriter.AddToPayloadSize(size)
}
}
func (s *Shard) setContainerObjectsCount(cnr string, typ string, v uint64) {
- if s.cfg.metricsWriter != nil && v > 0 {
+ if v > 0 {
s.metricsWriter.SetContainerObjectsCount(cnr, typ, v)
}
}
func (s *Shard) IncErrorCounter() {
- if s.cfg.metricsWriter != nil {
- s.cfg.metricsWriter.IncErrorCounter()
- }
+ s.cfg.metricsWriter.IncErrorCounter()
}
func (s *Shard) ClearErrorCounter() {
- if s.cfg.metricsWriter != nil {
- s.cfg.metricsWriter.ClearErrorCounter()
- }
+ s.cfg.metricsWriter.ClearErrorCounter()
}
func (s *Shard) DeleteShardMetrics() {
- if s.cfg.metricsWriter != nil {
- s.cfg.metricsWriter.DeleteShardMetrics()
- }
+ s.cfg.metricsWriter.DeleteShardMetrics()
}
func (s *Shard) SetEvacuationInProgress(val bool) {
s.m.Lock()
defer s.m.Unlock()
s.info.EvacuationInProgress = val
- if s.metricsWriter != nil {
- s.metricsWriter.SetEvacuationInProgress(val)
- }
+ s.metricsWriter.SetEvacuationInProgress(val)
}
From 9a87acb87ad243fcdd932e764a3f5f8d9c5c6657 Mon Sep 17 00:00:00 2001
From: Evgenii Stratonikov
Date: Thu, 3 Oct 2024 10:40:56 +0300
Subject: [PATCH 172/705] [#1410] engine: Provide the default implementation to
MetricsRegister
Signed-off-by: Evgenii Stratonikov
---
pkg/local_object_storage/engine/container.go | 8 +---
pkg/local_object_storage/engine/delete.go | 4 +-
pkg/local_object_storage/engine/engine.go | 1 +
pkg/local_object_storage/engine/get.go | 4 +-
pkg/local_object_storage/engine/head.go | 4 +-
pkg/local_object_storage/engine/inhume.go | 4 +-
pkg/local_object_storage/engine/metrics.go | 45 +++++++++++++++++++
pkg/local_object_storage/engine/put.go | 4 +-
pkg/local_object_storage/engine/range.go | 4 +-
pkg/local_object_storage/engine/select.go | 8 +---
pkg/local_object_storage/engine/shards.go | 46 +++++++++-----------
11 files changed, 77 insertions(+), 55 deletions(-)
diff --git a/pkg/local_object_storage/engine/container.go b/pkg/local_object_storage/engine/container.go
index e45f502ac..6def02f12 100644
--- a/pkg/local_object_storage/engine/container.go
+++ b/pkg/local_object_storage/engine/container.go
@@ -68,9 +68,7 @@ func ContainerSize(e *StorageEngine, id cid.ID) (uint64, error) {
}
func (e *StorageEngine) containerSize(prm ContainerSizePrm) (res ContainerSizeRes, err error) {
- if e.metrics != nil {
- defer elapsed("EstimateContainerSize", e.metrics.AddMethodDuration)()
- }
+ defer elapsed("EstimateContainerSize", e.metrics.AddMethodDuration)()
e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) {
var csPrm shard.ContainerSizePrm
@@ -116,9 +114,7 @@ func ListContainers(ctx context.Context, e *StorageEngine) ([]cid.ID, error) {
}
func (e *StorageEngine) listContainers(ctx context.Context) (ListContainersRes, error) {
- if e.metrics != nil {
- defer elapsed("ListContainers", e.metrics.AddMethodDuration)()
- }
+ defer elapsed("ListContainers", e.metrics.AddMethodDuration)()
uniqueIDs := make(map[string]cid.ID)
diff --git a/pkg/local_object_storage/engine/delete.go b/pkg/local_object_storage/engine/delete.go
index 318f938fb..61cb6832d 100644
--- a/pkg/local_object_storage/engine/delete.go
+++ b/pkg/local_object_storage/engine/delete.go
@@ -68,9 +68,7 @@ func (e *StorageEngine) Delete(ctx context.Context, prm DeletePrm) (res DeleteRe
}
func (e *StorageEngine) delete(ctx context.Context, prm DeletePrm) (DeleteRes, error) {
- if e.metrics != nil {
- defer elapsed("Delete", e.metrics.AddMethodDuration)()
- }
+ defer elapsed("Delete", e.metrics.AddMethodDuration)()
var locked struct {
is bool
diff --git a/pkg/local_object_storage/engine/engine.go b/pkg/local_object_storage/engine/engine.go
index 5e883a641..13efdcb84 100644
--- a/pkg/local_object_storage/engine/engine.go
+++ b/pkg/local_object_storage/engine/engine.go
@@ -219,6 +219,7 @@ func defaultCfg() *cfg {
res := &cfg{
log: &logger.Logger{Logger: zap.L()},
shardPoolSize: 20,
+ metrics: noopMetrics{},
}
res.containerSource.Store(&containerSource{})
return res
diff --git a/pkg/local_object_storage/engine/get.go b/pkg/local_object_storage/engine/get.go
index 253256c34..4a9199be7 100644
--- a/pkg/local_object_storage/engine/get.go
+++ b/pkg/local_object_storage/engine/get.go
@@ -66,9 +66,7 @@ func (e *StorageEngine) Get(ctx context.Context, prm GetPrm) (res GetRes, err er
}
func (e *StorageEngine) get(ctx context.Context, prm GetPrm) (GetRes, error) {
- if e.metrics != nil {
- defer elapsed("Get", e.metrics.AddMethodDuration)()
- }
+ defer elapsed("Get", e.metrics.AddMethodDuration)()
errNotFound := new(apistatus.ObjectNotFound)
diff --git a/pkg/local_object_storage/engine/head.go b/pkg/local_object_storage/engine/head.go
index 6857a3631..d2e3cfd99 100644
--- a/pkg/local_object_storage/engine/head.go
+++ b/pkg/local_object_storage/engine/head.go
@@ -68,9 +68,7 @@ func (e *StorageEngine) Head(ctx context.Context, prm HeadPrm) (res HeadRes, err
func (e *StorageEngine) head(ctx context.Context, prm HeadPrm) (HeadRes, error) {
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.head")
defer span.End()
- if e.metrics != nil {
- defer elapsed("Head", e.metrics.AddMethodDuration)()
- }
+ defer elapsed("Head", e.metrics.AddMethodDuration)()
var (
head *objectSDK.Object
diff --git a/pkg/local_object_storage/engine/inhume.go b/pkg/local_object_storage/engine/inhume.go
index 683713f94..35ce50f65 100644
--- a/pkg/local_object_storage/engine/inhume.go
+++ b/pkg/local_object_storage/engine/inhume.go
@@ -80,9 +80,7 @@ func (e *StorageEngine) Inhume(ctx context.Context, prm InhumePrm) (res InhumeRe
}
func (e *StorageEngine) inhume(ctx context.Context, prm InhumePrm) (InhumeRes, error) {
- if e.metrics != nil {
- defer elapsed("Inhume", e.metrics.AddMethodDuration)()
- }
+ defer elapsed("Inhume", e.metrics.AddMethodDuration)()
var shPrm shard.InhumePrm
if prm.forceRemoval {
diff --git a/pkg/local_object_storage/engine/metrics.go b/pkg/local_object_storage/engine/metrics.go
index 1c088c754..75936206d 100644
--- a/pkg/local_object_storage/engine/metrics.go
+++ b/pkg/local_object_storage/engine/metrics.go
@@ -68,3 +68,48 @@ func (m *gcMetrics) AddExpiredObjectCollectionDuration(d time.Duration, success
func (m *gcMetrics) AddInhumedObjectCount(count uint64, objectType string) {
m.storage.AddInhumedObjectCount(m.shardID, count, objectType)
}
+
+type (
+ noopMetrics struct{}
+ noopWriteCacheMetrics struct{}
+ noopGCMetrics struct{}
+)
+
+var (
+ _ MetricRegister = noopMetrics{}
+ _ metrics.WriteCacheMetrics = noopWriteCacheMetrics{}
+ _ metrics.GCMetrics = noopGCMetrics{}
+)
+
+func (noopMetrics) AddMethodDuration(string, time.Duration) {}
+func (noopMetrics) SetObjectCounter(string, string, uint64) {}
+func (noopMetrics) AddToObjectCounter(string, string, int) {}
+func (noopMetrics) SetMode(string, mode.Mode) {}
+func (noopMetrics) AddToContainerSize(string, int64) {}
+func (noopMetrics) DeleteContainerSize(string) {}
+func (noopMetrics) DeleteContainerCount(string) {}
+func (noopMetrics) AddToPayloadCounter(string, int64) {}
+func (noopMetrics) IncErrorCounter(string) {}
+func (noopMetrics) ClearErrorCounter(string) {}
+func (noopMetrics) DeleteShardMetrics(string) {}
+func (noopMetrics) SetContainerObjectCounter(string, string, string, uint64) {}
+func (noopMetrics) IncContainerObjectCounter(string, string, string) {}
+func (noopMetrics) SubContainerObjectCounter(string, string, string, uint64) {}
+func (noopMetrics) IncRefillObjectsCount(string, string, int, bool) {}
+func (noopMetrics) SetRefillPercent(string, string, uint32) {}
+func (noopMetrics) SetRefillStatus(string, string, string) {}
+func (noopMetrics) SetEvacuationInProgress(string, bool) {}
+func (noopMetrics) WriteCache() metrics.WriteCacheMetrics { return noopWriteCacheMetrics{} }
+func (noopMetrics) GC() metrics.GCMetrics { return noopGCMetrics{} }
+
+func (noopWriteCacheMetrics) AddMethodDuration(string, string, string, string, bool, time.Duration) {}
+func (noopWriteCacheMetrics) SetActualCount(string, string, string, uint64) {}
+func (noopWriteCacheMetrics) SetEstimateSize(string, string, string, uint64) {}
+func (noopWriteCacheMetrics) SetMode(string, string) {}
+func (noopWriteCacheMetrics) IncOperationCounter(string, string, string, string, metrics.NullBool) {}
+func (noopWriteCacheMetrics) Close(string, string) {}
+
+func (noopGCMetrics) AddRunDuration(string, time.Duration, bool) {}
+func (noopGCMetrics) AddDeletedCount(string, uint64, uint64) {}
+func (noopGCMetrics) AddExpiredObjectCollectionDuration(string, time.Duration, bool, string) {}
+func (noopGCMetrics) AddInhumedObjectCount(string, uint64, string) {}
diff --git a/pkg/local_object_storage/engine/put.go b/pkg/local_object_storage/engine/put.go
index f92d83745..bf86402a7 100644
--- a/pkg/local_object_storage/engine/put.go
+++ b/pkg/local_object_storage/engine/put.go
@@ -72,9 +72,7 @@ func (e *StorageEngine) Put(ctx context.Context, prm PutPrm) (err error) {
}
func (e *StorageEngine) put(ctx context.Context, prm PutPrm) error {
- if e.metrics != nil {
- defer elapsed("Put", e.metrics.AddMethodDuration)()
- }
+ defer elapsed("Put", e.metrics.AddMethodDuration)()
addr := object.AddressOf(prm.obj)
diff --git a/pkg/local_object_storage/engine/range.go b/pkg/local_object_storage/engine/range.go
index cbf26ff4e..498674fd2 100644
--- a/pkg/local_object_storage/engine/range.go
+++ b/pkg/local_object_storage/engine/range.go
@@ -82,9 +82,7 @@ func (e *StorageEngine) getRange(ctx context.Context, prm RngPrm) (RngRes, error
))
defer span.End()
- if e.metrics != nil {
- defer elapsed("GetRange", e.metrics.AddMethodDuration)()
- }
+ defer elapsed("GetRange", e.metrics.AddMethodDuration)()
var shPrm shard.RngPrm
shPrm.SetAddress(prm.addr)
diff --git a/pkg/local_object_storage/engine/select.go b/pkg/local_object_storage/engine/select.go
index 6a8c9fab9..972a4f52a 100644
--- a/pkg/local_object_storage/engine/select.go
+++ b/pkg/local_object_storage/engine/select.go
@@ -59,9 +59,7 @@ func (e *StorageEngine) Select(ctx context.Context, prm SelectPrm) (res SelectRe
}
func (e *StorageEngine) _select(ctx context.Context, prm SelectPrm) (SelectRes, error) {
- if e.metrics != nil {
- defer elapsed("Search", e.metrics.AddMethodDuration)()
- }
+ defer elapsed("Search", e.metrics.AddMethodDuration)()
addrList := make([]oid.Address, 0)
uniqueMap := make(map[string]struct{})
@@ -108,9 +106,7 @@ func (e *StorageEngine) List(ctx context.Context, limit uint64) (res SelectRes,
}
func (e *StorageEngine) list(ctx context.Context, limit uint64) (SelectRes, error) {
- if e.metrics != nil {
- defer elapsed("ListObjects", e.metrics.AddMethodDuration)()
- }
+ defer elapsed("ListObjects", e.metrics.AddMethodDuration)()
addrList := make([]oid.Address, 0, limit)
uniqueMap := make(map[string]struct{})
diff --git a/pkg/local_object_storage/engine/shards.go b/pkg/local_object_storage/engine/shards.go
index 2ad6859e4..96f54369b 100644
--- a/pkg/local_object_storage/engine/shards.go
+++ b/pkg/local_object_storage/engine/shards.go
@@ -116,9 +116,7 @@ func (e *StorageEngine) AddShard(ctx context.Context, opts ...shard.Option) (*sh
return nil, fmt.Errorf("could not add %s shard: %w", sh.ID().String(), err)
}
- if e.cfg.metrics != nil {
- e.cfg.metrics.SetMode(sh.ID().String(), sh.GetMode())
- }
+ e.cfg.metrics.SetMode(sh.ID().String(), sh.GetMode())
return sh.ID(), nil
}
@@ -152,28 +150,26 @@ func (e *StorageEngine) appendMetrics(id *shard.ID, opts []shard.Option) []shard
e.mtx.RLock()
defer e.mtx.RUnlock()
- if e.metrics != nil {
- opts = append(opts,
- shard.WithMetricsWriter(
- &metricsWithID{
- id: id.String(),
- mw: e.metrics,
- },
- ),
- shard.WithWriteCacheMetrics(
- &writeCacheMetrics{
- shardID: id.String(),
- metrics: e.metrics.WriteCache(),
- },
- ),
- shard.WithGCMetrics(
- &gcMetrics{
- storage: e.metrics.GC(),
- shardID: id.String(),
- },
- ),
- )
- }
+ opts = append(opts,
+ shard.WithMetricsWriter(
+ &metricsWithID{
+ id: id.String(),
+ mw: e.metrics,
+ },
+ ),
+ shard.WithWriteCacheMetrics(
+ &writeCacheMetrics{
+ shardID: id.String(),
+ metrics: e.metrics.WriteCache(),
+ },
+ ),
+ shard.WithGCMetrics(
+ &gcMetrics{
+ storage: e.metrics.GC(),
+ shardID: id.String(),
+ },
+ ),
+ )
return opts
}
From 963faa615ab0a70964821b3a3725c27ed5d7f60e Mon Sep 17 00:00:00 2001
From: Evgenii Stratonikov
Date: Fri, 4 Oct 2024 14:58:45 +0300
Subject: [PATCH 173/705] [#1413] engine: Cleanup shard error reporting
- `reportShardErrorBackground()` no longer differs from
`reportShardError()`, reflect this in its name;
- reuse common pieces of code to make it simpler.
Signed-off-by: Evgenii Stratonikov
---
pkg/local_object_storage/engine/engine.go | 28 ++++-------------------
pkg/local_object_storage/engine/shards.go | 2 +-
2 files changed, 5 insertions(+), 25 deletions(-)
diff --git a/pkg/local_object_storage/engine/engine.go b/pkg/local_object_storage/engine/engine.go
index 13efdcb84..f40c9cc04 100644
--- a/pkg/local_object_storage/engine/engine.go
+++ b/pkg/local_object_storage/engine/engine.go
@@ -115,10 +115,8 @@ func (e *StorageEngine) moveToDegraded(sh *shard.Shard, errCount uint32, isMeta
log.Info(logs.EngineShardIsMovedInReadonlyModeDueToErrorThreshold)
}
-// reportShardErrorBackground increases shard error counter and logs an error.
-// It is intended to be used from background workers and
-// doesn't change shard mode because of possible deadlocks.
-func (e *StorageEngine) reportShardErrorBackground(id string, msg string, err error) {
+// reportShardErrorByID increases shard error counter and logs an error.
+func (e *StorageEngine) reportShardErrorByID(id string, msg string, err error) {
e.mtx.RLock()
sh, ok := e.shards[id]
e.mtx.RUnlock()
@@ -127,16 +125,7 @@ func (e *StorageEngine) reportShardErrorBackground(id string, msg string, err er
return
}
- if isLogical(err) {
- e.log.Warn(msg,
- zap.Stringer("shard_id", sh.ID()),
- zap.String("error", err.Error()))
- return
- }
-
- errCount := sh.errorCount.Add(1)
- sh.Shard.IncErrorCounter()
- e.reportShardErrorWithFlags(sh.Shard, errCount, msg, err)
+ e.reportShardError(sh, msg, err)
}
// reportShardError checks that the amount of errors doesn't exceed the configured threshold.
@@ -156,16 +145,7 @@ func (e *StorageEngine) reportShardError(
errCount := sh.errorCount.Add(1)
sh.Shard.IncErrorCounter()
- e.reportShardErrorWithFlags(sh.Shard, errCount, msg, err, fields...)
-}
-func (e *StorageEngine) reportShardErrorWithFlags(
- sh *shard.Shard,
- errCount uint32,
- msg string,
- err error,
- fields ...zap.Field,
-) {
sid := sh.ID()
e.log.Warn(msg, append([]zap.Field{
zap.Stringer("shard_id", sid),
@@ -179,7 +159,7 @@ func (e *StorageEngine) reportShardErrorWithFlags(
req := setModeRequest{
errorCount: errCount,
- sh: sh,
+ sh: sh.Shard,
isMeta: errors.As(err, new(metaerr.Error)),
}
diff --git a/pkg/local_object_storage/engine/shards.go b/pkg/local_object_storage/engine/shards.go
index 96f54369b..c3ccb5276 100644
--- a/pkg/local_object_storage/engine/shards.go
+++ b/pkg/local_object_storage/engine/shards.go
@@ -134,7 +134,7 @@ func (e *StorageEngine) createShard(_ context.Context, opts []shard.Option) (*sh
shard.WithExpiredTombstonesCallback(e.processExpiredTombstones),
shard.WithExpiredLocksCallback(e.processExpiredLocks),
shard.WithDeletedLockCallback(e.processDeletedLocks),
- shard.WithReportErrorFunc(e.reportShardErrorBackground),
+ shard.WithReportErrorFunc(e.reportShardErrorByID),
shard.WithZeroSizeCallback(e.processZeroSizeContainers),
shard.WithZeroCountCallback(e.processZeroCountContainers),
)...)
From 4dc9a1b300b2f22dbd9628713917347bc0d3a6ee Mon Sep 17 00:00:00 2001
From: Evgenii Stratonikov
Date: Fri, 4 Oct 2024 15:07:20 +0300
Subject: [PATCH 174/705] [#1413] engine: Remove error counting methods from
Shard
All error counting and hangling logic is present on the engine level.
Currently, we pass engine metrics with shard ID metric to shard, then
export 3 methods to manipulate these metrics.
In this commits all methods are removed and error counter is tracked on
the engine level exlusively.
Signed-off-by: Evgenii Stratonikov
---
pkg/local_object_storage/engine/engine.go | 2 +-
pkg/local_object_storage/engine/shards.go | 6 +++---
pkg/local_object_storage/shard/metrics.go | 9 ---------
pkg/local_object_storage/shard/shard.go | 12 ------------
4 files changed, 4 insertions(+), 25 deletions(-)
diff --git a/pkg/local_object_storage/engine/engine.go b/pkg/local_object_storage/engine/engine.go
index f40c9cc04..3183d6137 100644
--- a/pkg/local_object_storage/engine/engine.go
+++ b/pkg/local_object_storage/engine/engine.go
@@ -144,7 +144,7 @@ func (e *StorageEngine) reportShardError(
}
errCount := sh.errorCount.Add(1)
- sh.Shard.IncErrorCounter()
+ e.metrics.IncErrorCounter(sh.ID().String())
sid := sh.ID()
e.log.Warn(msg, append([]zap.Field{
diff --git a/pkg/local_object_storage/engine/shards.go b/pkg/local_object_storage/engine/shards.go
index c3ccb5276..aab2c423c 100644
--- a/pkg/local_object_storage/engine/shards.go
+++ b/pkg/local_object_storage/engine/shards.go
@@ -217,7 +217,7 @@ func (e *StorageEngine) removeShards(ids ...string) {
continue
}
- sh.DeleteShardMetrics()
+ e.metrics.DeleteShardMetrics(id)
ss = append(ss, sh)
delete(e.shards, id)
@@ -318,7 +318,7 @@ func (e *StorageEngine) SetShardMode(id *shard.ID, m mode.Mode, resetErrorCounte
if id.String() == shID {
if resetErrorCounter {
sh.errorCount.Store(0)
- sh.Shard.ClearErrorCounter()
+ e.metrics.ClearErrorCounter(shID)
}
return sh.SetMode(m)
}
@@ -422,7 +422,7 @@ func (e *StorageEngine) deleteShards(ids []*shard.ID) ([]hashedShard, error) {
for _, sh := range ss {
idStr := sh.ID().String()
- sh.DeleteShardMetrics()
+ e.metrics.DeleteShardMetrics(idStr)
delete(e.shards, idStr)
diff --git a/pkg/local_object_storage/shard/metrics.go b/pkg/local_object_storage/shard/metrics.go
index 6bf198048..91bf8d0ae 100644
--- a/pkg/local_object_storage/shard/metrics.go
+++ b/pkg/local_object_storage/shard/metrics.go
@@ -24,12 +24,6 @@ type MetricsWriter interface {
SetShardID(id string)
// SetReadonly must set shard mode.
SetMode(mode mode.Mode)
- // IncErrorCounter increment error counter.
- IncErrorCounter()
- // ClearErrorCounter clear error counter.
- ClearErrorCounter()
- // DeleteShardMetrics deletes shard metrics from registry.
- DeleteShardMetrics()
// SetContainerObjectsCount sets container object count.
SetContainerObjectsCount(cnrID string, objectType string, value uint64)
// IncContainerObjectsCount increments container object count.
@@ -57,9 +51,6 @@ func (noopMetrics) AddToPayloadSize(int64) {}
func (noopMetrics) IncObjectCounter(string) {}
func (noopMetrics) SetShardID(string) {}
func (noopMetrics) SetMode(mode.Mode) {}
-func (noopMetrics) IncErrorCounter() {}
-func (noopMetrics) ClearErrorCounter() {}
-func (noopMetrics) DeleteShardMetrics() {}
func (noopMetrics) SetContainerObjectsCount(string, string, uint64) {}
func (noopMetrics) IncContainerObjectsCount(string, string) {}
func (noopMetrics) SubContainerObjectsCount(string, string, uint64) {}
diff --git a/pkg/local_object_storage/shard/shard.go b/pkg/local_object_storage/shard/shard.go
index a57b548be..d7e723733 100644
--- a/pkg/local_object_storage/shard/shard.go
+++ b/pkg/local_object_storage/shard/shard.go
@@ -494,18 +494,6 @@ func (s *Shard) setContainerObjectsCount(cnr string, typ string, v uint64) {
}
}
-func (s *Shard) IncErrorCounter() {
- s.cfg.metricsWriter.IncErrorCounter()
-}
-
-func (s *Shard) ClearErrorCounter() {
- s.cfg.metricsWriter.ClearErrorCounter()
-}
-
-func (s *Shard) DeleteShardMetrics() {
- s.cfg.metricsWriter.DeleteShardMetrics()
-}
-
func (s *Shard) SetEvacuationInProgress(val bool) {
s.m.Lock()
defer s.m.Unlock()
From 2f710d8f945f90c5d65e4c9a0c53f0dfdcc4f291 Mon Sep 17 00:00:00 2001
From: Anton Nikiforov
Date: Fri, 4 Oct 2024 15:23:22 +0300
Subject: [PATCH 175/705] [#1414] metabase: Check parameter for
`CountAliveObjectsInBucket`
Signed-off-by: Anton Nikiforov
---
pkg/local_object_storage/metabase/list.go | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/pkg/local_object_storage/metabase/list.go b/pkg/local_object_storage/metabase/list.go
index 44f25246e..74a529809 100644
--- a/pkg/local_object_storage/metabase/list.go
+++ b/pkg/local_object_storage/metabase/list.go
@@ -452,10 +452,11 @@ func (db *DB) CountAliveObjectsInBucket(ctx context.Context, prm CountAliveObjec
return 0, ErrDegradedMode
}
- cidRaw := prm.BucketName[1:bucketKeySize]
- if cidRaw == nil {
+ if len(prm.BucketName) != bucketKeySize {
return 0, nil
}
+
+ cidRaw := prm.BucketName[1:bucketKeySize]
var count uint64
err := db.boltDB.View(func(tx *bbolt.Tx) error {
bkt := tx.Bucket(prm.BucketName)
From fc032838c037c7c649f80181ca71d8c9f6847e7d Mon Sep 17 00:00:00 2001
From: Evgenii Stratonikov
Date: Mon, 7 Oct 2024 11:50:47 +0300
Subject: [PATCH 176/705] [#1215] blobstor/test: Cover iteration behaviour
Signed-off-by: Evgenii Stratonikov
---
.../blobstor/iterate_test.go | 172 ++++++------------
1 file changed, 59 insertions(+), 113 deletions(-)
diff --git a/pkg/local_object_storage/blobstor/iterate_test.go b/pkg/local_object_storage/blobstor/iterate_test.go
index 079728380..195d0bd31 100644
--- a/pkg/local_object_storage/blobstor/iterate_test.go
+++ b/pkg/local_object_storage/blobstor/iterate_test.go
@@ -3,10 +3,13 @@ package blobstor
import (
"context"
"encoding/binary"
+ "errors"
"os"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/memstore"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/teststore"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
@@ -90,117 +93,60 @@ func TestIterateObjects(t *testing.T) {
}
func TestIterate_IgnoreErrors(t *testing.T) {
- t.Skip()
- // dir := t.TempDir()
- //
- // const (
- // smallSize = 512
- // objCount = 5
- // )
- // bsOpts := []Option{
- // WithCompressObjects(true),
- // WithRootPath(dir),
- // WithSmallSizeLimit(smallSize * 2), // + header
- // WithBlobovniczaOpenedCacheSize(1),
- // WithBlobovniczaShallowWidth(1),
- // WithBlobovniczaShallowDepth(1)}
- // bs := New(bsOpts...)
- // require.NoError(t, bs.Open(false))
- // require.NoError(t, bs.Init())
- //
- // addrs := make([]oid.Address, objCount)
- // for i := range addrs {
- // addrs[i] = oidtest.Address()
- //
- // obj := object.New()
- // obj.SetContainerID(addrs[i].Container())
- // obj.SetID(addrs[i].Object())
- // obj.SetPayload(make([]byte, smallSize<<(i%2)))
- //
- // objData, err := obj.Marshal()
- // require.NoError(t, err)
- //
- // _, err = bs.PutRaw(addrs[i], objData, true)
- // require.NoError(t, err)
- // }
- //
- // // Construct corrupted compressed object.
- // buf := bytes.NewBuffer(nil)
- // badObject := make([]byte, smallSize/2+1)
- // enc, err := zstd.NewWriter(buf)
- // require.NoError(t, err)
- // rawData := enc.EncodeAll(badObject, nil)
- // for i := 4; /* magic size */ i < len(rawData); i += 2 {
- // rawData[i] ^= 0xFF
- // }
- // // Will be put uncompressed but fetched as compressed because of magic.
- // _, err = bs.PutRaw(oidtest.Address(), rawData, false)
- // require.NoError(t, err)
- // require.NoError(t, bs.fsTree.Put(oidtest.Address(), rawData))
- //
- // require.NoError(t, bs.Close())
- //
- // // Increase width to have blobovnicza which is definitely empty.
- // b := New(append(bsOpts, WithBlobovniczaShallowWidth(2))...)
- // require.NoError(t, b.Open(false))
- // require.NoError(t, b.Init())
- //
- // var p string
- // for i := 0; i < 2; i++ {
- // bp := filepath.Join(bs.rootPath, "1", strconv.FormatUint(uint64(i), 10))
- // if _, ok := bs.blobovniczas.opened.Get(bp); !ok {
- // p = bp
- // break
- // }
- // }
- // require.NotEqual(t, "", p, "expected to not have at least 1 blobovnicza in cache")
- // require.NoError(t, os.Chmod(p, 0))
- //
- // require.NoError(t, b.Close())
- // require.NoError(t, bs.Open(false))
- // require.NoError(t, bs.Init())
- //
- // var prm IteratePrm
- // prm.SetIterationHandler(func(e IterationElement) error {
- // return nil
- // })
- // _, err = bs.Iterate(prm)
- // require.Error(t, err)
- //
- // prm.IgnoreErrors()
- //
- // t.Run("skip invalid objects", func(t *testing.T) {
- // actual := make([]oid.Address, 0, len(addrs))
- // prm.SetIterationHandler(func(e IterationElement) error {
- // obj := object.New()
- // err := obj.Unmarshal(e.data)
- // if err != nil {
- // return err
- // }
- //
- // var addr oid.Address
- // cnr, _ := obj.ContainerID()
- // addr.SetContainer(cnr)
- // id, _ := obj.ID()
- // addr.SetObject(id)
- // actual = append(actual, addr)
- // return nil
- // })
- //
- // _, err := bs.Iterate(prm)
- // require.NoError(t, err)
- // require.ElementsMatch(t, addrs, actual)
- // })
- // t.Run("return errors from handler", func(t *testing.T) {
- // n := 0
- // expectedErr := errors.New("expected error")
- // prm.SetIterationHandler(func(e IterationElement) error {
- // if n++; n == objCount/2 {
- // return expectedErr
- // }
- // return nil
- // })
- // _, err := bs.Iterate(prm)
- // require.ErrorIs(t, err, expectedErr)
- // })
+ ctx := context.Background()
+
+ myErr := errors.New("unique error")
+ nopIter := func(common.IteratePrm) (common.IterateRes, error) { return common.IterateRes{}, nil }
+ panicIter := func(common.IteratePrm) (common.IterateRes, error) { panic("unreachable") }
+ errIter := func(common.IteratePrm) (common.IterateRes, error) { return common.IterateRes{}, myErr }
+
+ var s1iter, s2iter func(common.IteratePrm) (common.IterateRes, error)
+ st1 := teststore.New(
+ teststore.WithSubstorage(memstore.New()),
+ teststore.WithIterate(func(prm common.IteratePrm) (common.IterateRes, error) {
+ return s1iter(prm)
+ }))
+ st2 := teststore.New(
+ teststore.WithSubstorage(memstore.New()),
+ teststore.WithIterate(func(prm common.IteratePrm) (common.IterateRes, error) {
+ return s2iter(prm)
+ }))
+
+ bsOpts := []Option{WithStorages([]SubStorage{
+ {Storage: st1},
+ {Storage: st2},
+ })}
+ bs := New(bsOpts...)
+ require.NoError(t, bs.Open(ctx, mode.ReadWrite))
+ require.NoError(t, bs.Init())
+
+ nopHandler := func(e common.IterationElement) error {
+ return nil
+ }
+
+ t.Run("no errors", func(t *testing.T) {
+ s1iter = nopIter
+ s2iter = nopIter
+ _, err := bs.Iterate(ctx, common.IteratePrm{Handler: nopHandler})
+ require.NoError(t, err)
+ })
+ t.Run("error in the first sub storage, the second one is not iterated over", func(t *testing.T) {
+ s1iter = errIter
+ s2iter = panicIter
+ _, err := bs.Iterate(ctx, common.IteratePrm{Handler: nopHandler})
+ require.ErrorIs(t, err, myErr)
+ })
+
+ t.Run("ignore errors, storage 1", func(t *testing.T) {
+ s1iter = errIter
+ s2iter = nopIter
+ _, err := bs.Iterate(ctx, common.IteratePrm{IgnoreErrors: true, Handler: nopHandler})
+ require.NoError(t, err)
+ })
+ t.Run("ignore errors, storage 2", func(t *testing.T) {
+ s1iter = nopIter
+ s2iter = errIter
+ _, err := bs.Iterate(ctx, common.IteratePrm{IgnoreErrors: true, Handler: nopHandler})
+ require.NoError(t, err)
+ })
}
From a5de74a2492deb9e969c1d722c82bb8b050130df Mon Sep 17 00:00:00 2001
From: Evgenii Stratonikov
Date: Mon, 7 Oct 2024 15:13:44 +0300
Subject: [PATCH 177/705] [#1418] go.mod: Update api-go
Signed-off-by: Evgenii Stratonikov
---
go.mod | 2 +-
go.sum | 4 ++--
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/go.mod b/go.mod
index 1023948bc..f81ba9cf7 100644
--- a/go.mod
+++ b/go.mod
@@ -4,7 +4,7 @@ go 1.22
require (
code.gitea.io/sdk/gitea v0.17.1
- git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20241001111852-d9a604fbc16f
+ git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20241007120543-29c522d5d8a3
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240909072709-3e221b973a3c
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d
diff --git a/go.sum b/go.sum
index 5d719a027..8aa087de4 100644
--- a/go.sum
+++ b/go.sum
@@ -1,7 +1,7 @@
code.gitea.io/sdk/gitea v0.17.1 h1:3jCPOG2ojbl8AcfaUCRYLT5MUcBMFwS0OSK2mA5Zok8=
code.gitea.io/sdk/gitea v0.17.1/go.mod h1:aCnBqhHpoEWA180gMbaCtdX9Pl6BWBAuuP2miadoTNM=
-git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20241001111852-d9a604fbc16f h1:FZvX6CLzTQqMyMvOerIKMvIEJQbOImDjSooZx3AVRyE=
-git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20241001111852-d9a604fbc16f/go.mod h1:F5GS7hRb62PUy5sTYDC4ajVdeffoAfjHSSHTKUJEaYU=
+git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20241007120543-29c522d5d8a3 h1:6QXNnfBgYx81UZsBdpPnQY+ZMSKGFbFc29wV7DJ/UG4=
+git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20241007120543-29c522d5d8a3/go.mod h1:F5GS7hRb62PUy5sTYDC4ajVdeffoAfjHSSHTKUJEaYU=
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240909072709-3e221b973a3c h1:9JmXxWcqWaOXModBwtj/WlPYOfSk2jRSDoRsDRYvgnA=
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240909072709-3e221b973a3c/go.mod h1:DVb1AqP323wiEwCtlfoJJTERF4Xji2L39X6T8kv26RE=
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSVCB8JNSfPG7Uk4r2oSk=
From 5fbb2657ca9ca9bbc3aa2ca9239fbb55ea47cdc3 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Tue, 8 Oct 2024 10:02:14 +0300
Subject: [PATCH 178/705] [#1419] mod: Bump sdk-go version
Signed-off-by: Dmitrii Stepanov
---
go.mod | 4 ++--
go.sum | 8 ++++----
2 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/go.mod b/go.mod
index f81ba9cf7..91cc55a36 100644
--- a/go.mod
+++ b/go.mod
@@ -9,7 +9,7 @@ require (
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20240909114314-666d326cc573
- git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240918095938-e580ee991d98
+ git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20241007135805-4c310ae1c7fa
git.frostfs.info/TrueCloudLab/hrw v1.2.1
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240814080254-96225afacb88
git.frostfs.info/TrueCloudLab/tzhash v1.8.0
@@ -60,7 +60,7 @@ require (
require (
git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0 // indirect
- github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
+ github.com/antlr4-go/antlr/v4 v4.13.1 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/bits-and-blooms/bitset v1.13.0 // indirect
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
diff --git a/go.sum b/go.sum
index 8aa087de4..728592ea5 100644
--- a/go.sum
+++ b/go.sum
@@ -10,8 +10,8 @@ git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d/go.mod h1:7ZZq8iguY7qFsXajdHGmZd2AW4QbucyrJwhbsRfOfek=
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20240909114314-666d326cc573 h1:6qCcm1oqFbmf9C5AauXzrL5OPGnTbI9HoB/jAtD9274=
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20240909114314-666d326cc573/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g=
-git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240918095938-e580ee991d98 h1:ijUci3thz0EwWkuRJDocW5D1RkVAJlt9xNG4CYepC90=
-git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240918095938-e580ee991d98/go.mod h1:GeNpo12HcEW4J412sH5yf8xFYapxlrt5fcYzRwg0Ino=
+git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20241007135805-4c310ae1c7fa h1:Jr8hXNNFECLhC7S45HuyQms4U/gim1xILoU3g4ZZnHg=
+git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20241007135805-4c310ae1c7fa/go.mod h1:jmb7yxzZota9jWbC10p+7YR+6wwJPBj7J/Fl5VDkXys=
git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc=
git.frostfs.info/TrueCloudLab/hrw v1.2.1/go.mod h1:C1Ygde2n843yTZEQ0FP69jYiuaYV0kriLvP4zm8JuvM=
git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20240726093631-5481339d6928 h1:LK3mCkNZkY48eBA9jnk1N0eQZLsZhOG+XYw4EBoKUjM=
@@ -27,8 +27,8 @@ git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02/go.
github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/VictoriaMetrics/easyproto v0.1.4 h1:r8cNvo8o6sR4QShBXQd1bKw/VVLSQma/V2KhTBPf+Sc=
github.com/VictoriaMetrics/easyproto v0.1.4/go.mod h1:QlGlzaJnDfFd8Lk6Ci/fuLxfTo3/GThPs2KH23mv710=
-github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI=
-github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g=
+github.com/antlr4-go/antlr/v4 v4.13.1 h1:SqQKkuVZ+zWkMMNkjy5FZe5mr5WURWnlpmOuzYWrPrQ=
+github.com/antlr4-go/antlr/v4 v4.13.1/go.mod h1:GKmUxMtwp6ZgGwZSva4eWPC5mS6vUAmOABFgjdkM7Nw=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bits-and-blooms/bitset v1.13.0 h1:bAQ9OPNFYbGHV6Nez0tmNI0RiEu7/hxlYJRUA0wFAVE=
From 0c49bca19c82d574c9a93681bda77362edd5b88c Mon Sep 17 00:00:00 2001
From: Aleksey Savchuk
Date: Mon, 7 Oct 2024 18:32:26 +0300
Subject: [PATCH 179/705] [#1415] lens/explorer: Add timeout for opening
database
Signed-off-by: Aleksey Savchuk
---
cmd/frostfs-lens/internal/blobovnicza/tui.go | 13 +------------
cmd/frostfs-lens/internal/meta/tui.go | 13 +------------
cmd/frostfs-lens/internal/tui/util.go | 13 +++++++++++++
cmd/frostfs-lens/internal/writecache/tui.go | 13 +------------
4 files changed, 16 insertions(+), 36 deletions(-)
diff --git a/cmd/frostfs-lens/internal/blobovnicza/tui.go b/cmd/frostfs-lens/internal/blobovnicza/tui.go
index eb4a5ff59..4aa281616 100644
--- a/cmd/frostfs-lens/internal/blobovnicza/tui.go
+++ b/cmd/frostfs-lens/internal/blobovnicza/tui.go
@@ -9,7 +9,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/tui"
"github.com/rivo/tview"
"github.com/spf13/cobra"
- "go.etcd.io/bbolt"
)
var tuiCMD = &cobra.Command{
@@ -43,7 +42,7 @@ func tuiFunc(cmd *cobra.Command, _ []string) {
}
func runTUI(cmd *cobra.Command) error {
- db, err := openDB(false)
+ db, err := tui.OpenDB(vPath, false)
if err != nil {
return fmt.Errorf("couldn't open database: %w", err)
}
@@ -67,13 +66,3 @@ func runTUI(cmd *cobra.Command) error {
app.SetRoot(ui, true).SetFocus(ui)
return app.Run()
}
-
-func openDB(writable bool) (*bbolt.DB, error) {
- db, err := bbolt.Open(vPath, 0o600, &bbolt.Options{
- ReadOnly: !writable,
- })
- if err != nil {
- return nil, err
- }
- return db, nil
-}
diff --git a/cmd/frostfs-lens/internal/meta/tui.go b/cmd/frostfs-lens/internal/meta/tui.go
index 00e8bf117..5a41f945c 100644
--- a/cmd/frostfs-lens/internal/meta/tui.go
+++ b/cmd/frostfs-lens/internal/meta/tui.go
@@ -9,7 +9,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/tui"
"github.com/rivo/tview"
"github.com/spf13/cobra"
- "go.etcd.io/bbolt"
)
var tuiCMD = &cobra.Command{
@@ -44,7 +43,7 @@ func tuiFunc(cmd *cobra.Command, _ []string) {
}
func runTUI(cmd *cobra.Command) error {
- db, err := openDB(false)
+ db, err := tui.OpenDB(vPath, false)
if err != nil {
return fmt.Errorf("couldn't open database: %w", err)
}
@@ -70,13 +69,3 @@ func runTUI(cmd *cobra.Command) error {
app.SetRoot(ui, true).SetFocus(ui)
return app.Run()
}
-
-func openDB(writable bool) (*bbolt.DB, error) {
- db, err := bbolt.Open(vPath, 0o600, &bbolt.Options{
- ReadOnly: !writable,
- })
- if err != nil {
- return nil, err
- }
- return db, nil
-}
diff --git a/cmd/frostfs-lens/internal/tui/util.go b/cmd/frostfs-lens/internal/tui/util.go
index d4e13b2a9..2d1ab3e33 100644
--- a/cmd/frostfs-lens/internal/tui/util.go
+++ b/cmd/frostfs-lens/internal/tui/util.go
@@ -3,12 +3,25 @@ package tui
import (
"errors"
"strings"
+ "time"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/mr-tron/base58"
+ "go.etcd.io/bbolt"
)
+func OpenDB(path string, writable bool) (*bbolt.DB, error) {
+ db, err := bbolt.Open(path, 0o600, &bbolt.Options{
+ ReadOnly: !writable,
+ Timeout: 100 * time.Millisecond,
+ })
+ if err != nil {
+ return nil, err
+ }
+ return db, nil
+}
+
func CIDParser(s string) (any, error) {
data, err := base58.Decode(s)
if err != nil {
diff --git a/cmd/frostfs-lens/internal/writecache/tui.go b/cmd/frostfs-lens/internal/writecache/tui.go
index 6b7532b08..b7e4d7c96 100644
--- a/cmd/frostfs-lens/internal/writecache/tui.go
+++ b/cmd/frostfs-lens/internal/writecache/tui.go
@@ -9,7 +9,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/tui"
"github.com/rivo/tview"
"github.com/spf13/cobra"
- "go.etcd.io/bbolt"
)
var tuiCMD = &cobra.Command{
@@ -43,7 +42,7 @@ func tuiFunc(cmd *cobra.Command, _ []string) {
}
func runTUI(cmd *cobra.Command) error {
- db, err := openDB(false)
+ db, err := tui.OpenDB(vPath, false)
if err != nil {
return fmt.Errorf("couldn't open database: %w", err)
}
@@ -67,13 +66,3 @@ func runTUI(cmd *cobra.Command) error {
app.SetRoot(ui, true).SetFocus(ui)
return app.Run()
}
-
-func openDB(writable bool) (*bbolt.DB, error) {
- db, err := bbolt.Open(vPath, 0o600, &bbolt.Options{
- ReadOnly: !writable,
- })
- if err != nil {
- return nil, err
- }
- return db, nil
-}
From 899cd55c277b04b974e67df29b81146528d5c293 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Tue, 1 Oct 2024 13:28:46 +0300
Subject: [PATCH 180/705] [#1412] engine: PutPrm refactoring
Use fields instead of methods.
Signed-off-by: Dmitrii Stepanov
---
pkg/local_object_storage/engine/list_test.go | 6 +----
pkg/local_object_storage/engine/put.go | 24 ++++++--------------
2 files changed, 8 insertions(+), 22 deletions(-)
diff --git a/pkg/local_object_storage/engine/list_test.go b/pkg/local_object_storage/engine/list_test.go
index 11a6c7841..d683b5475 100644
--- a/pkg/local_object_storage/engine/list_test.go
+++ b/pkg/local_object_storage/engine/list_test.go
@@ -82,11 +82,7 @@ func TestListWithCursor(t *testing.T) {
for range tt.objectNum {
containerID := cidtest.ID()
obj := testutil.GenerateObjectWithCIDWithPayload(containerID, []byte{'a'})
-
- var prm PutPrm
- prm.WithObject(obj)
-
- err := e.Put(context.Background(), prm)
+ err := e.Put(context.Background(), PutPrm{Object: obj})
require.NoError(t, err)
expected = append(expected, object.Info{Type: objectSDK.TypeRegular, Address: object.AddressOf(obj)})
}
diff --git a/pkg/local_object_storage/engine/put.go b/pkg/local_object_storage/engine/put.go
index bf86402a7..9ce31e791 100644
--- a/pkg/local_object_storage/engine/put.go
+++ b/pkg/local_object_storage/engine/put.go
@@ -22,7 +22,7 @@ import (
// PutPrm groups the parameters of Put operation.
type PutPrm struct {
- obj *objectSDK.Object
+ Object *objectSDK.Object
}
var errPutShard = errors.New("could not put object to any shard")
@@ -41,13 +41,6 @@ type putToShardRes struct {
err error
}
-// WithObject is a Put option to set object to save.
-//
-// Option is required.
-func (p *PutPrm) WithObject(obj *objectSDK.Object) {
- p.obj = obj
-}
-
// Put saves the object to local storage.
//
// Returns any error encountered that
@@ -59,7 +52,7 @@ func (p *PutPrm) WithObject(obj *objectSDK.Object) {
func (e *StorageEngine) Put(ctx context.Context, prm PutPrm) (err error) {
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.Put",
trace.WithAttributes(
- attribute.String("address", object.AddressOf(prm.obj).EncodeToString()),
+ attribute.String("address", object.AddressOf(prm.Object).EncodeToString()),
))
defer span.End()
@@ -74,13 +67,13 @@ func (e *StorageEngine) Put(ctx context.Context, prm PutPrm) (err error) {
func (e *StorageEngine) put(ctx context.Context, prm PutPrm) error {
defer elapsed("Put", e.metrics.AddMethodDuration)()
- addr := object.AddressOf(prm.obj)
+ addr := object.AddressOf(prm.Object)
// In #1146 this check was parallelized, however, it became
// much slower on fast machines for 4 shards.
var parent oid.Address
- if prm.obj.ECHeader() != nil {
- parent.SetObject(prm.obj.ECHeader().Parent())
+ if prm.Object.ECHeader() != nil {
+ parent.SetObject(prm.Object.ECHeader().Parent())
parent.SetContainer(addr.Container())
}
var shPrm shard.ExistsPrm
@@ -113,7 +106,7 @@ func (e *StorageEngine) put(ctx context.Context, prm PutPrm) error {
// Shard was concurrently removed, skip.
return false
}
- shRes = e.putToShard(ctx, sh, pool, addr, prm.obj)
+ shRes = e.putToShard(ctx, sh, pool, addr, prm.Object)
return shRes.status != putToShardUnknown
})
switch shRes.status {
@@ -202,8 +195,5 @@ func (e *StorageEngine) putToShard(ctx context.Context, sh hashedShard, pool uti
// Put writes provided object to local storage.
func Put(ctx context.Context, storage *StorageEngine, obj *objectSDK.Object) error {
- var putPrm PutPrm
- putPrm.WithObject(obj)
-
- return storage.Put(ctx, putPrm)
+ return storage.Put(ctx, PutPrm{Object: obj})
}
From 1b520f79733e3628af5d47b597b5baff60f3f36a Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Tue, 1 Oct 2024 15:27:06 +0300
Subject: [PATCH 181/705] [#1412] engine: Add `IsIndexedContainer` flag
Signed-off-by: Dmitrii Stepanov
---
cmd/frostfs-node/object.go | 4 +-
pkg/core/container/info.go | 103 ++++++++++++++++++
pkg/core/container/util.go | 12 ++
.../engine/control_test.go | 2 +-
.../engine/delete_test.go | 8 +-
.../engine/engine_test.go | 2 +-
.../engine/inhume_test.go | 2 +-
pkg/local_object_storage/engine/lock_test.go | 14 +--
pkg/local_object_storage/engine/put.go | 7 +-
pkg/local_object_storage/engine/tree_test.go | 2 +-
pkg/services/object/common/writer/ec.go | 3 +-
pkg/services/object/common/writer/local.go | 9 +-
pkg/services/object/common/writer/writer.go | 3 +-
pkg/services/object/put/single.go | 11 +-
pkg/services/policer/check.go | 2 +-
pkg/services/policer/ec.go | 39 ++++---
pkg/services/replicator/pull.go | 3 +-
pkg/services/replicator/put.go | 3 +-
pkg/services/replicator/task.go | 3 +
19 files changed, 182 insertions(+), 50 deletions(-)
create mode 100644 pkg/core/container/info.go
diff --git a/cmd/frostfs-node/object.go b/cmd/frostfs-node/object.go
index 47649c88b..5c322886b 100644
--- a/cmd/frostfs-node/object.go
+++ b/cmd/frostfs-node/object.go
@@ -535,6 +535,6 @@ func (e engineWithoutNotifications) Lock(ctx context.Context, locker oid.Address
return e.engine.Lock(ctx, locker.Container(), locker.Object(), toLock)
}
-func (e engineWithoutNotifications) Put(ctx context.Context, o *objectSDK.Object) error {
- return engine.Put(ctx, e.engine, o)
+func (e engineWithoutNotifications) Put(ctx context.Context, o *objectSDK.Object, indexableContainer bool) error {
+ return engine.Put(ctx, e.engine, o, indexableContainer)
}
diff --git a/pkg/core/container/info.go b/pkg/core/container/info.go
new file mode 100644
index 000000000..62cc21553
--- /dev/null
+++ b/pkg/core/container/info.go
@@ -0,0 +1,103 @@
+package container
+
+import (
+ "sync"
+
+ utilSync "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/sync"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+)
+
+type Info struct {
+ Indexed bool
+ Removed bool
+}
+
+type infoValue struct {
+ info Info
+ err error
+}
+
+type InfoProvider interface {
+ Info(id cid.ID) (Info, error)
+}
+
+type infoProvider struct {
+ mtx *sync.RWMutex
+ cache map[cid.ID]infoValue
+ kl *utilSync.KeyLocker[cid.ID]
+
+ source Source
+ sourceErr error
+ sourceOnce *sync.Once
+ sourceFactory func() (Source, error)
+}
+
+func NewInfoProvider(sourceFactory func() (Source, error)) InfoProvider {
+ return &infoProvider{
+ mtx: &sync.RWMutex{},
+ cache: make(map[cid.ID]infoValue),
+ sourceOnce: &sync.Once{},
+ kl: utilSync.NewKeyLocker[cid.ID](),
+ sourceFactory: sourceFactory,
+ }
+}
+
+func (r *infoProvider) Info(id cid.ID) (Info, error) {
+ v, found := r.tryGetFromCache(id)
+ if found {
+ return v.info, v.err
+ }
+
+ return r.getFromSource(id)
+}
+
+func (r *infoProvider) tryGetFromCache(id cid.ID) (infoValue, bool) {
+ r.mtx.RLock()
+ defer r.mtx.RUnlock()
+
+ value, found := r.cache[id]
+ return value, found
+}
+
+func (r *infoProvider) getFromSource(id cid.ID) (Info, error) {
+ r.kl.Lock(id)
+ defer r.kl.Unlock(id)
+
+ if v, ok := r.tryGetFromCache(id); ok {
+ return v.info, v.err
+ }
+
+ r.sourceOnce.Do(func() {
+ r.source, r.sourceErr = r.sourceFactory()
+ })
+ if r.sourceErr != nil {
+ return Info{}, r.sourceErr
+ }
+
+ cnr, err := r.source.Get(id)
+ var civ infoValue
+ if err != nil {
+ if client.IsErrContainerNotFound(err) {
+ removed, err := WasRemoved(r.source, id)
+ if err != nil {
+ civ.err = err
+ } else {
+ civ.info.Removed = removed
+ }
+ } else {
+ civ.err = err
+ }
+ } else {
+ civ.info.Indexed = IsIndexedContainer(cnr.Value)
+ }
+ r.putToCache(id, civ)
+ return civ.info, civ.err
+}
+
+func (r *infoProvider) putToCache(id cid.ID, ct infoValue) {
+ r.mtx.Lock()
+ defer r.mtx.Unlock()
+
+ r.cache[id] = ct
+}
diff --git a/pkg/core/container/util.go b/pkg/core/container/util.go
index 98919284e..d27556807 100644
--- a/pkg/core/container/util.go
+++ b/pkg/core/container/util.go
@@ -4,6 +4,7 @@ import (
"errors"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+ containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
)
@@ -20,3 +21,14 @@ func WasRemoved(s Source, cid cid.ID) (bool, error) {
}
return false, err
}
+
+// IsIndexedContainer returns True if container attributes should be indexed.
+func IsIndexedContainer(cnr containerSDK.Container) bool {
+ var isS3Container bool
+ cnr.IterateAttributes(func(key, _ string) {
+ if key == ".s3-location-constraint" {
+ isS3Container = true
+ }
+ })
+ return !isS3Container
+}
diff --git a/pkg/local_object_storage/engine/control_test.go b/pkg/local_object_storage/engine/control_test.go
index 2de92ae84..83babeca3 100644
--- a/pkg/local_object_storage/engine/control_test.go
+++ b/pkg/local_object_storage/engine/control_test.go
@@ -171,7 +171,7 @@ func TestExecBlocks(t *testing.T) {
addr := object.AddressOf(obj)
- require.NoError(t, Put(context.Background(), e, obj))
+ require.NoError(t, Put(context.Background(), e, obj, false))
// block executions
errBlock := errors.New("block exec err")
diff --git a/pkg/local_object_storage/engine/delete_test.go b/pkg/local_object_storage/engine/delete_test.go
index 4a6758012..0904c9820 100644
--- a/pkg/local_object_storage/engine/delete_test.go
+++ b/pkg/local_object_storage/engine/delete_test.go
@@ -58,9 +58,9 @@ func TestDeleteBigObject(t *testing.T) {
defer e.Close(context.Background())
for i := range children {
- require.NoError(t, Put(context.Background(), e, children[i]))
+ require.NoError(t, Put(context.Background(), e, children[i], false))
}
- require.NoError(t, Put(context.Background(), e, link))
+ require.NoError(t, Put(context.Background(), e, link, false))
addrParent := object.AddressOf(parent)
checkGetError[*objectSDK.SplitInfoError](t, e, addrParent, true)
@@ -126,9 +126,9 @@ func TestDeleteBigObjectWithoutGC(t *testing.T) {
defer e.Close(context.Background())
for i := range children {
- require.NoError(t, Put(context.Background(), e, children[i]))
+ require.NoError(t, Put(context.Background(), e, children[i], false))
}
- require.NoError(t, Put(context.Background(), e, link))
+ require.NoError(t, Put(context.Background(), e, link, false))
addrParent := object.AddressOf(parent)
checkGetError[*objectSDK.SplitInfoError](t, e, addrParent, true)
diff --git a/pkg/local_object_storage/engine/engine_test.go b/pkg/local_object_storage/engine/engine_test.go
index 525e17f34..88c523b76 100644
--- a/pkg/local_object_storage/engine/engine_test.go
+++ b/pkg/local_object_storage/engine/engine_test.go
@@ -54,7 +54,7 @@ func benchmarkExists(b *testing.B, shardNum int) {
addr := oidtest.Address()
for range 100 {
obj := testutil.GenerateObjectWithCID(cidtest.ID())
- err := Put(context.Background(), e, obj)
+ err := Put(context.Background(), e, obj, false)
if err != nil {
b.Fatal(err)
}
diff --git a/pkg/local_object_storage/engine/inhume_test.go b/pkg/local_object_storage/engine/inhume_test.go
index 9daa113f8..f87679253 100644
--- a/pkg/local_object_storage/engine/inhume_test.go
+++ b/pkg/local_object_storage/engine/inhume_test.go
@@ -40,7 +40,7 @@ func TestStorageEngine_Inhume(t *testing.T) {
e := testNewEngine(t).setShardsNum(t, 1).engine
defer e.Close(context.Background())
- err := Put(context.Background(), e, parent)
+ err := Put(context.Background(), e, parent, false)
require.NoError(t, err)
var inhumePrm InhumePrm
diff --git a/pkg/local_object_storage/engine/lock_test.go b/pkg/local_object_storage/engine/lock_test.go
index 9e6758fb4..3702f567f 100644
--- a/pkg/local_object_storage/engine/lock_test.go
+++ b/pkg/local_object_storage/engine/lock_test.go
@@ -97,7 +97,7 @@ func TestLockUserScenario(t *testing.T) {
id, _ := obj.ID()
objAddr.SetObject(id)
- err = Put(context.Background(), e, obj)
+ err = Put(context.Background(), e, obj, false)
require.NoError(t, err)
// 2.
@@ -105,7 +105,7 @@ func TestLockUserScenario(t *testing.T) {
locker.WriteMembers([]oid.ID{id})
objectSDK.WriteLock(lockerObj, locker)
- err = Put(context.Background(), e, lockerObj)
+ err = Put(context.Background(), e, lockerObj, false)
require.NoError(t, err)
err = e.Lock(context.Background(), cnr, lockerID, []oid.ID{id})
@@ -124,7 +124,7 @@ func TestLockUserScenario(t *testing.T) {
tombObj.SetID(tombForLockID)
tombObj.SetAttributes(a)
- err = Put(context.Background(), e, tombObj)
+ err = Put(context.Background(), e, tombObj, false)
require.NoError(t, err)
inhumePrm.WithTarget(tombForLockAddr, lockerAddr)
@@ -177,7 +177,7 @@ func TestLockExpiration(t *testing.T) {
// 1.
obj := testutil.GenerateObjectWithCID(cnr)
- err = Put(context.Background(), e, obj)
+ err = Put(context.Background(), e, obj, false)
require.NoError(t, err)
// 2.
@@ -189,7 +189,7 @@ func TestLockExpiration(t *testing.T) {
lock.SetType(objectSDK.TypeLock)
lock.SetAttributes(a)
- err = Put(context.Background(), e, lock)
+ err = Put(context.Background(), e, lock, false)
require.NoError(t, err)
id, _ := obj.ID()
@@ -254,14 +254,14 @@ func TestLockForceRemoval(t *testing.T) {
// 1.
obj := testutil.GenerateObjectWithCID(cnr)
- err = Put(context.Background(), e, obj)
+ err = Put(context.Background(), e, obj, false)
require.NoError(t, err)
// 2.
lock := testutil.GenerateObjectWithCID(cnr)
lock.SetType(objectSDK.TypeLock)
- err = Put(context.Background(), e, lock)
+ err = Put(context.Background(), e, lock, false)
require.NoError(t, err)
id, _ := obj.ID()
diff --git a/pkg/local_object_storage/engine/put.go b/pkg/local_object_storage/engine/put.go
index 9ce31e791..41e566560 100644
--- a/pkg/local_object_storage/engine/put.go
+++ b/pkg/local_object_storage/engine/put.go
@@ -22,7 +22,8 @@ import (
// PutPrm groups the parameters of Put operation.
type PutPrm struct {
- Object *objectSDK.Object
+ Object *objectSDK.Object
+ IsIndexedContainer bool
}
var errPutShard = errors.New("could not put object to any shard")
@@ -194,6 +195,6 @@ func (e *StorageEngine) putToShard(ctx context.Context, sh hashedShard, pool uti
}
// Put writes provided object to local storage.
-func Put(ctx context.Context, storage *StorageEngine, obj *objectSDK.Object) error {
- return storage.Put(ctx, PutPrm{Object: obj})
+func Put(ctx context.Context, storage *StorageEngine, obj *objectSDK.Object, indexedContainer bool) error {
+ return storage.Put(ctx, PutPrm{Object: obj, IsIndexedContainer: indexedContainer})
}
diff --git a/pkg/local_object_storage/engine/tree_test.go b/pkg/local_object_storage/engine/tree_test.go
index 6f694f082..21fcce415 100644
--- a/pkg/local_object_storage/engine/tree_test.go
+++ b/pkg/local_object_storage/engine/tree_test.go
@@ -37,7 +37,7 @@ func benchmarkTreeVsSearch(b *testing.B, objCount int) {
for i := range objCount {
obj := testutil.GenerateObjectWithCID(cid)
testutil.AddAttribute(obj, pilorama.AttributeFilename, strconv.Itoa(i))
- err := Put(context.Background(), te.ng, obj)
+ err := Put(context.Background(), te.ng, obj, false)
if err != nil {
b.Fatal(err)
}
diff --git a/pkg/services/object/common/writer/ec.go b/pkg/services/object/common/writer/ec.go
index fb0a8e4e5..6b6a14cc0 100644
--- a/pkg/services/object/common/writer/ec.go
+++ b/pkg/services/object/common/writer/ec.go
@@ -310,7 +310,8 @@ func (e *ECWriter) putECPartToNode(ctx context.Context, obj *objectSDK.Object, n
func (e *ECWriter) writePartLocal(ctx context.Context, obj *objectSDK.Object) error {
var err error
localTarget := LocalTarget{
- Storage: e.Config.LocalStore,
+ Storage: e.Config.LocalStore,
+ Container: e.Container,
}
completed := make(chan interface{})
if poolErr := e.Config.LocalPool.Submit(func() {
diff --git a/pkg/services/object/common/writer/local.go b/pkg/services/object/common/writer/local.go
index 02fd25b9e..e219b44dd 100644
--- a/pkg/services/object/common/writer/local.go
+++ b/pkg/services/object/common/writer/local.go
@@ -4,7 +4,9 @@ import (
"context"
"fmt"
+ containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
+ containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
)
@@ -13,7 +15,7 @@ import (
type ObjectStorage interface {
// Put must save passed object
// and return any appeared error.
- Put(context.Context, *objectSDK.Object) error
+ Put(context.Context, *objectSDK.Object, bool) error
// Delete must delete passed objects
// and return any appeared error.
Delete(ctx context.Context, tombstone oid.Address, toDelete []oid.ID) error
@@ -25,7 +27,8 @@ type ObjectStorage interface {
}
type LocalTarget struct {
- Storage ObjectStorage
+ Storage ObjectStorage
+ Container containerSDK.Container
}
func (t LocalTarget) WriteObject(ctx context.Context, obj *objectSDK.Object, meta objectCore.ContentMeta) error {
@@ -44,7 +47,7 @@ func (t LocalTarget) WriteObject(ctx context.Context, obj *objectSDK.Object, met
// objects that do not change meta storage
}
- if err := t.Storage.Put(ctx, obj); err != nil {
+ if err := t.Storage.Put(ctx, obj, containerCore.IsIndexedContainer(t.Container)); err != nil {
return fmt.Errorf("(%T) could not put object to local storage: %w", t, err)
}
return nil
diff --git a/pkg/services/object/common/writer/writer.go b/pkg/services/object/common/writer/writer.go
index 3d50da988..0e4c4d9c6 100644
--- a/pkg/services/object/common/writer/writer.go
+++ b/pkg/services/object/common/writer/writer.go
@@ -150,7 +150,8 @@ func newDefaultObjectWriter(prm *Params, forECPlacement bool) transformer.Object
nodeTargetInitializer: func(node NodeDescriptor) preparedObjectTarget {
if node.Local {
return LocalTarget{
- Storage: prm.Config.LocalStore,
+ Storage: prm.Config.LocalStore,
+ Container: prm.Container,
}
}
diff --git a/pkg/services/object/put/single.go b/pkg/services/object/put/single.go
index 9b4163268..5f9b5d110 100644
--- a/pkg/services/object/put/single.go
+++ b/pkg/services/object/put/single.go
@@ -177,7 +177,7 @@ func (s *Service) saveToREPReplicas(ctx context.Context, placement putSinglePlac
}
return iter.ForEachNode(ctx, func(ctx context.Context, nd objectwriter.NodeDescriptor) error {
- return s.saveToPlacementNode(ctx, &nd, obj, signer, meta)
+ return s.saveToPlacementNode(ctx, &nd, obj, signer, meta, placement.container)
})
}
@@ -263,10 +263,10 @@ func (s *Service) getPutSinglePlacementOptions(obj *objectSDK.Object, copiesNumb
}
func (s *Service) saveToPlacementNode(ctx context.Context, nodeDesc *objectwriter.NodeDescriptor, obj *objectSDK.Object,
- signer *putSingleRequestSigner, meta object.ContentMeta,
+ signer *putSingleRequestSigner, meta object.ContentMeta, container containerSDK.Container,
) error {
if nodeDesc.Local {
- return s.saveLocal(ctx, obj, meta)
+ return s.saveLocal(ctx, obj, meta, container)
}
var info client.NodeInfo
@@ -281,9 +281,10 @@ func (s *Service) saveToPlacementNode(ctx context.Context, nodeDesc *objectwrite
return s.redirectPutSingleRequest(ctx, signer, obj, info, c)
}
-func (s *Service) saveLocal(ctx context.Context, obj *objectSDK.Object, meta object.ContentMeta) error {
+func (s *Service) saveLocal(ctx context.Context, obj *objectSDK.Object, meta object.ContentMeta, container containerSDK.Container) error {
localTarget := &objectwriter.LocalTarget{
- Storage: s.Config.LocalStore,
+ Storage: s.Config.LocalStore,
+ Container: container,
}
return localTarget.WriteObject(ctx, obj, meta)
}
diff --git a/pkg/services/policer/check.go b/pkg/services/policer/check.go
index 06282bd8d..401977f66 100644
--- a/pkg/services/policer/check.go
+++ b/pkg/services/policer/check.go
@@ -37,7 +37,7 @@ func (p *Policer) processObject(ctx context.Context, objInfo objectcore.Info) er
policy := cnr.Value.PlacementPolicy()
if policycore.IsECPlacement(policy) {
- return p.processECContainerObject(ctx, objInfo, policy)
+ return p.processECContainerObject(ctx, objInfo, cnr.Value)
}
return p.processRepContainerObject(ctx, objInfo, policy)
}
diff --git a/pkg/services/policer/ec.go b/pkg/services/policer/ec.go
index e822d1c09..6d2c153c9 100644
--- a/pkg/services/policer/ec.go
+++ b/pkg/services/policer/ec.go
@@ -10,6 +10,7 @@ import (
objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/replicator"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
+ containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/erasurecode"
@@ -27,11 +28,11 @@ type ecChunkProcessResult struct {
var errInvalidECPlacement = errors.New("invalid EC placement: EC placement must have one placement vector with at least one node")
-func (p *Policer) processECContainerObject(ctx context.Context, objInfo objectcore.Info, policy netmap.PlacementPolicy) error {
+func (p *Policer) processECContainerObject(ctx context.Context, objInfo objectcore.Info, cnr containerSDK.Container) error {
if objInfo.ECInfo == nil {
- return p.processECContainerRepObject(ctx, objInfo, policy)
+ return p.processECContainerRepObject(ctx, objInfo, cnr.PlacementPolicy())
}
- return p.processECContainerECObject(ctx, objInfo, policy)
+ return p.processECContainerECObject(ctx, objInfo, cnr)
}
// processECContainerRepObject processes non erasure coded objects in EC container: tombstones, locks and linking objects.
@@ -67,8 +68,8 @@ func (p *Policer) processECContainerRepObject(ctx context.Context, objInfo objec
return nil
}
-func (p *Policer) processECContainerECObject(ctx context.Context, objInfo objectcore.Info, policy netmap.PlacementPolicy) error {
- nn, err := p.placementBuilder.BuildPlacement(objInfo.Address.Container(), &objInfo.ECInfo.ParentID, policy)
+func (p *Policer) processECContainerECObject(ctx context.Context, objInfo objectcore.Info, cnr containerSDK.Container) error {
+ nn, err := p.placementBuilder.BuildPlacement(objInfo.Address.Container(), &objInfo.ECInfo.ParentID, cnr.PlacementPolicy())
if err != nil {
return fmt.Errorf("%s: %w", logs.PolicerCouldNotBuildPlacementVectorForObject, err)
}
@@ -85,9 +86,9 @@ func (p *Policer) processECContainerECObject(ctx context.Context, objInfo object
res := p.processECChunk(ctx, objInfo, nn[0])
if !res.validPlacement {
// drop local chunk only if all required chunks are in place
- res.removeLocal = res.removeLocal && p.pullRequiredECChunks(ctx, objInfo, nn[0])
+ res.removeLocal = res.removeLocal && p.pullRequiredECChunks(ctx, objInfo, nn[0], cnr)
}
- p.adjustECPlacement(ctx, objInfo, nn[0], policy)
+ p.adjustECPlacement(ctx, objInfo, nn[0], cnr)
if res.removeLocal {
p.log.Info(logs.PolicerRedundantLocalObjectCopyDetected, zap.Stringer("object", objInfo.Address))
@@ -138,7 +139,7 @@ func (p *Policer) processECChunk(ctx context.Context, objInfo objectcore.Info, n
}
}
-func (p *Policer) pullRequiredECChunks(ctx context.Context, objInfo objectcore.Info, nodes []netmap.NodeInfo) bool {
+func (p *Policer) pullRequiredECChunks(ctx context.Context, objInfo objectcore.Info, nodes []netmap.NodeInfo, cnr containerSDK.Container) bool {
var parentAddress oid.Address
parentAddress.SetContainer(objInfo.Address.Container())
parentAddress.SetObject(objInfo.ECInfo.ParentID)
@@ -169,8 +170,9 @@ func (p *Policer) pullRequiredECChunks(ctx context.Context, objInfo objectcore.I
addr.SetContainer(objInfo.Address.Container())
addr.SetObject(indexToObjectID[index])
p.replicator.HandlePullTask(ctx, replicator.Task{
- Addr: addr,
- Nodes: candidates,
+ Addr: addr,
+ Nodes: candidates,
+ Container: cnr,
})
}
// there was some missing chunks, it's not ok
@@ -245,7 +247,7 @@ func (p *Policer) resolveRemoteECChunks(ctx context.Context, parentAddress oid.A
return true
}
-func (p *Policer) adjustECPlacement(ctx context.Context, objInfo objectcore.Info, nodes []netmap.NodeInfo, policy netmap.PlacementPolicy) {
+func (p *Policer) adjustECPlacement(ctx context.Context, objInfo objectcore.Info, nodes []netmap.NodeInfo, cnr containerSDK.Container) {
var parentAddress oid.Address
parentAddress.SetContainer(objInfo.Address.Container())
parentAddress.SetObject(objInfo.ECInfo.ParentID)
@@ -292,7 +294,7 @@ func (p *Policer) adjustECPlacement(ctx context.Context, objInfo objectcore.Info
if !restore || uint32(len(resolved)) == objInfo.ECInfo.Total {
return
}
- if objInfo.ECInfo.Total-uint32(len(resolved)) > policy.ReplicaDescriptor(0).GetECParityCount() {
+ if objInfo.ECInfo.Total-uint32(len(resolved)) > cnr.PlacementPolicy().ReplicaDescriptor(0).GetECParityCount() {
var found []uint32
for i := range resolved {
found = append(found, i)
@@ -300,11 +302,13 @@ func (p *Policer) adjustECPlacement(ctx context.Context, objInfo objectcore.Info
p.log.Error(logs.PolicerCouldNotRestoreObjectNotEnoughChunks, zap.Stringer("object", parentAddress), zap.Uint32s("found_chunks", found))
return
}
- p.restoreECObject(ctx, objInfo, parentAddress, nodes, resolved, chunkIDs, policy)
+ p.restoreECObject(ctx, objInfo, parentAddress, nodes, resolved, chunkIDs, cnr)
}
-func (p *Policer) restoreECObject(ctx context.Context, objInfo objectcore.Info, parentAddress oid.Address, nodes []netmap.NodeInfo, existedChunks map[uint32][]netmap.NodeInfo, chunkIDs map[uint32]oid.ID, policy netmap.PlacementPolicy) {
- c, err := erasurecode.NewConstructor(int(policy.ReplicaDescriptor(0).GetECDataCount()), int(policy.ReplicaDescriptor(0).GetECParityCount()))
+func (p *Policer) restoreECObject(ctx context.Context, objInfo objectcore.Info, parentAddress oid.Address, nodes []netmap.NodeInfo, existedChunks map[uint32][]netmap.NodeInfo, chunkIDs map[uint32]oid.ID,
+ cnr containerSDK.Container,
+) {
+ c, err := erasurecode.NewConstructor(int(cnr.PlacementPolicy().ReplicaDescriptor(0).GetECDataCount()), int(cnr.PlacementPolicy().ReplicaDescriptor(0).GetECParityCount()))
if err != nil {
p.log.Error(logs.PolicerFailedToRestoreObject, zap.Stringer("object", parentAddress), zap.Error(err))
return
@@ -339,8 +343,9 @@ func (p *Policer) restoreECObject(ctx context.Context, objInfo objectcore.Info,
targetNode := nodes[idx%len(nodes)]
if p.cfg.netmapKeys.IsLocalKey(targetNode.PublicKey()) {
p.replicator.HandleLocalPutTask(ctx, replicator.Task{
- Addr: addr,
- Obj: part,
+ Addr: addr,
+ Obj: part,
+ Container: cnr,
})
} else {
p.replicator.HandleReplicationTask(ctx, replicator.Task{
diff --git a/pkg/services/replicator/pull.go b/pkg/services/replicator/pull.go
index d178700f6..7e7090237 100644
--- a/pkg/services/replicator/pull.go
+++ b/pkg/services/replicator/pull.go
@@ -5,6 +5,7 @@ import (
"errors"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get"
tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
@@ -62,7 +63,7 @@ func (p *Replicator) HandlePullTask(ctx context.Context, task Task) {
return
}
- err := engine.Put(ctx, p.localStorage, obj)
+ err := engine.Put(ctx, p.localStorage, obj, containerCore.IsIndexedContainer(task.Container))
if err != nil {
p.log.Error(logs.ReplicatorCouldNotPutObjectToLocalStorage,
zap.Stringer("object", task.Addr),
diff --git a/pkg/services/replicator/put.go b/pkg/services/replicator/put.go
index c06ec3f65..537833516 100644
--- a/pkg/services/replicator/put.go
+++ b/pkg/services/replicator/put.go
@@ -5,6 +5,7 @@ import (
"errors"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
@@ -37,7 +38,7 @@ func (p *Replicator) HandleLocalPutTask(ctx context.Context, task Task) {
return
}
- err := engine.Put(ctx, p.localStorage, task.Obj)
+ err := engine.Put(ctx, p.localStorage, task.Obj, containerCore.IsIndexedContainer(task.Container))
if err != nil {
p.log.Error(logs.ReplicatorCouldNotPutObjectToLocalStorage,
zap.Stringer("object", task.Addr),
diff --git a/pkg/services/replicator/task.go b/pkg/services/replicator/task.go
index d2b5b2506..a03f8dcaa 100644
--- a/pkg/services/replicator/task.go
+++ b/pkg/services/replicator/task.go
@@ -1,6 +1,7 @@
package replicator
import (
+ containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@@ -16,4 +17,6 @@ type Task struct {
Obj *objectSDK.Object
// Nodes is a list of potential object holders.
Nodes []netmap.NodeInfo
+
+ Container containerSDK.Container
}
From be744ae3e6eadb5b02952cbb110ef59f33f799bc Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Tue, 1 Oct 2024 18:19:12 +0300
Subject: [PATCH 182/705] [#1412] metabase: Index attributes for indexed
containers
Signed-off-by: Dmitrii Stepanov
---
cmd/frostfs-node/object.go | 4 +-
pkg/local_object_storage/engine/evacuate.go | 11 +--
pkg/local_object_storage/engine/put.go | 5 +-
pkg/local_object_storage/metabase/delete.go | 45 ++++++++++
.../metabase/delete_meta_test.go | 85 +++++++++++++++++++
pkg/local_object_storage/metabase/put.go | 80 ++++++++++++++++-
pkg/local_object_storage/metabase/util.go | 8 +-
pkg/local_object_storage/shard/put.go | 8 +-
8 files changed, 231 insertions(+), 15 deletions(-)
create mode 100644 pkg/local_object_storage/metabase/delete_meta_test.go
diff --git a/cmd/frostfs-node/object.go b/cmd/frostfs-node/object.go
index 5c322886b..f2c4bff1d 100644
--- a/cmd/frostfs-node/object.go
+++ b/cmd/frostfs-node/object.go
@@ -535,6 +535,6 @@ func (e engineWithoutNotifications) Lock(ctx context.Context, locker oid.Address
return e.engine.Lock(ctx, locker.Container(), locker.Object(), toLock)
}
-func (e engineWithoutNotifications) Put(ctx context.Context, o *objectSDK.Object, indexableContainer bool) error {
- return engine.Put(ctx, e.engine, o, indexableContainer)
+func (e engineWithoutNotifications) Put(ctx context.Context, o *objectSDK.Object, indexedContainer bool) error {
+ return engine.Put(ctx, e.engine, o, indexedContainer)
}
diff --git a/pkg/local_object_storage/engine/evacuate.go b/pkg/local_object_storage/engine/evacuate.go
index a618ff274..1baf237f9 100644
--- a/pkg/local_object_storage/engine/evacuate.go
+++ b/pkg/local_object_storage/engine/evacuate.go
@@ -18,6 +18,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
+ containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@@ -471,7 +472,7 @@ func (e *StorageEngine) evacuateShardObjects(ctx context.Context, cancel context
default:
}
egObject.Go(func() error {
- err := e.evacuateObject(ctx, shardID, objInfo, prm, res, shards, shardsToEvacuate)
+ err := e.evacuateObject(ctx, shardID, objInfo, prm, res, shards, shardsToEvacuate, c.Value)
if err != nil {
cancel(err)
}
@@ -744,7 +745,7 @@ func (e *StorageEngine) getActualShards(shardIDs []string, prm EvacuateShardPrm)
}
func (e *StorageEngine) evacuateObject(ctx context.Context, shardID string, objInfo *object.Info, prm EvacuateShardPrm, res *EvacuateShardRes,
- getShards func() []pooledShard, shardsToEvacuate map[string]*shard.Shard,
+ getShards func() []pooledShard, shardsToEvacuate map[string]*shard.Shard, cnr containerSDK.Container,
) error {
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.evacuateObjects")
defer span.End()
@@ -773,7 +774,7 @@ func (e *StorageEngine) evacuateObject(ctx context.Context, shardID string, objI
return err
}
- evacuatedLocal, err := e.tryEvacuateObjectLocal(ctx, addr, getRes.Object(), shardsToEvacuate[shardID], shards, shardsToEvacuate, res)
+ evacuatedLocal, err := e.tryEvacuateObjectLocal(ctx, addr, getRes.Object(), shardsToEvacuate[shardID], shards, shardsToEvacuate, res, cnr)
if err != nil {
return err
}
@@ -817,7 +818,7 @@ func (e *StorageEngine) isNotRepOne(c *container.Container) bool {
}
func (e *StorageEngine) tryEvacuateObjectLocal(ctx context.Context, addr oid.Address, object *objectSDK.Object, sh *shard.Shard,
- shards []pooledShard, shardsToEvacuate map[string]*shard.Shard, res *EvacuateShardRes,
+ shards []pooledShard, shardsToEvacuate map[string]*shard.Shard, res *EvacuateShardRes, cnr containerSDK.Container,
) (bool, error) {
hrw.SortHasherSliceByValue(shards, hrw.StringHash(addr.EncodeToString()))
for j := range shards {
@@ -830,7 +831,7 @@ func (e *StorageEngine) tryEvacuateObjectLocal(ctx context.Context, addr oid.Add
if _, ok := shardsToEvacuate[shards[j].ID().String()]; ok {
continue
}
- switch e.putToShard(ctx, shards[j].hashedShard, shards[j].pool, addr, object).status {
+ switch e.putToShard(ctx, shards[j].hashedShard, shards[j].pool, addr, object, container.IsIndexedContainer(cnr)).status {
case putToShardSuccess:
res.objEvacuated.Add(1)
e.log.Debug(logs.EngineObjectIsMovedToAnotherShard,
diff --git a/pkg/local_object_storage/engine/put.go b/pkg/local_object_storage/engine/put.go
index 41e566560..a50d80b75 100644
--- a/pkg/local_object_storage/engine/put.go
+++ b/pkg/local_object_storage/engine/put.go
@@ -107,7 +107,7 @@ func (e *StorageEngine) put(ctx context.Context, prm PutPrm) error {
// Shard was concurrently removed, skip.
return false
}
- shRes = e.putToShard(ctx, sh, pool, addr, prm.Object)
+ shRes = e.putToShard(ctx, sh, pool, addr, prm.Object, prm.IsIndexedContainer)
return shRes.status != putToShardUnknown
})
switch shRes.status {
@@ -125,7 +125,7 @@ func (e *StorageEngine) put(ctx context.Context, prm PutPrm) error {
// putToShard puts object to sh.
// Return putToShardStatus and error if it is necessary to propagate an error upper.
func (e *StorageEngine) putToShard(ctx context.Context, sh hashedShard, pool util.WorkerPool,
- addr oid.Address, obj *objectSDK.Object,
+ addr oid.Address, obj *objectSDK.Object, isIndexedContainer bool,
) (res putToShardRes) {
exitCh := make(chan struct{})
@@ -158,6 +158,7 @@ func (e *StorageEngine) putToShard(ctx context.Context, sh hashedShard, pool uti
var putPrm shard.PutPrm
putPrm.SetObject(obj)
+ putPrm.SetIndexAttributes(isIndexedContainer)
_, err = sh.Put(ctx, putPrm)
if err != nil {
diff --git a/pkg/local_object_storage/metabase/delete.go b/pkg/local_object_storage/metabase/delete.go
index 4ad11164f..3add1f268 100644
--- a/pkg/local_object_storage/metabase/delete.go
+++ b/pkg/local_object_storage/metabase/delete.go
@@ -338,6 +338,11 @@ func (db *DB) deleteObject(
return fmt.Errorf("can't remove list indexes: %w", err)
}
+ err = updateFKBTIndexes(tx, obj, delFKBTIndexItem)
+ if err != nil {
+ return fmt.Errorf("can't remove fake bucket tree indexes: %w", err)
+ }
+
if isParent {
// remove record from the garbage bucket, because regular object deletion does nothing for virtual object
garbageBKT := tx.Bucket(garbageBucketName)
@@ -415,6 +420,46 @@ func delListIndexItem(tx *bbolt.Tx, item namedBucketItem) error {
return nil
}
+func delFKBTIndexItem(tx *bbolt.Tx, item namedBucketItem) error {
+ bkt := tx.Bucket(item.name)
+ if bkt == nil {
+ return nil
+ }
+
+ fkbtRoot := bkt.Bucket(item.key)
+ if fkbtRoot == nil {
+ return nil
+ }
+
+ if err := fkbtRoot.Delete(item.val); err != nil {
+ return err
+ }
+
+ if hasAnyItem(fkbtRoot) {
+ return nil
+ }
+
+ if err := bkt.DeleteBucket(item.key); err != nil {
+ return err
+ }
+
+ if hasAnyItem(bkt) {
+ return nil
+ }
+
+ return tx.DeleteBucket(item.name)
+}
+
+func hasAnyItem(b *bbolt.Bucket) bool {
+ var hasAnyItem bool
+ c := b.Cursor()
+ for k, _ := c.First(); k != nil; k, _ = c.Next() {
+ hasAnyItem = true
+ break
+ }
+ return hasAnyItem
+}
+
func delUniqueIndexes(tx *bbolt.Tx, obj *objectSDK.Object, isParent bool) error {
addr := object.AddressOf(obj)
diff --git a/pkg/local_object_storage/metabase/delete_meta_test.go b/pkg/local_object_storage/metabase/delete_meta_test.go
new file mode 100644
index 000000000..cdfe2a203
--- /dev/null
+++ b/pkg/local_object_storage/metabase/delete_meta_test.go
@@ -0,0 +1,85 @@
+package meta
+
+import (
+ "bytes"
+ "context"
+ "path/filepath"
+ "testing"
+
+ objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
+ cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ "github.com/stretchr/testify/require"
+ "go.etcd.io/bbolt"
+)
+
+func TestPutDeleteIndexAttributes(t *testing.T) {
+ db := New([]Option{
+ WithPath(filepath.Join(t.TempDir(), "metabase")),
+ WithPermissions(0o600),
+ WithEpochState(epochState{}),
+ }...)
+
+ require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
+ require.NoError(t, db.Init())
+ defer func() { require.NoError(t, db.Close()) }()
+
+ cnr := cidtest.ID()
+ obj1 := testutil.GenerateObjectWithCID(cnr)
+ testutil.AddAttribute(obj1, "S3-Access-Box-CRDT-Name", "CRDT-Name")
+ testutil.AddAttribute(obj1, objectSDK.AttributeFilePath, "/path/to/object")
+
+ var putPrm PutPrm
+ putPrm.SetObject(obj1)
+
+ _, err := db.Put(context.Background(), putPrm)
+ require.NoError(t, err)
+
+ require.NoError(t, db.boltDB.View(func(tx *bbolt.Tx) error {
+ b := tx.Bucket(attributeBucketName(cnr, "S3-Access-Box-CRDT-Name", make([]byte, bucketKeySize)))
+ require.Nil(t, b)
+ b = tx.Bucket(attributeBucketName(cnr, objectSDK.AttributeFilePath, make([]byte, bucketKeySize)))
+ require.Nil(t, b)
+ return nil
+ }))
+
+ obj2 := testutil.GenerateObjectWithCID(cnr)
+ testutil.AddAttribute(obj2, "S3-Access-Box-CRDT-Name", "CRDT-Name")
+ testutil.AddAttribute(obj2, objectSDK.AttributeFilePath, "/path/to/object")
+
+ putPrm.SetObject(obj2)
+ putPrm.SetIndexAttributes(true)
+
+ _, err = db.Put(context.Background(), putPrm)
+ require.NoError(t, err)
+
+ objKey := objectKey(objectCore.AddressOf(obj2).Object(), make([]byte, objectKeySize))
+ require.NoError(t, db.boltDB.View(func(tx *bbolt.Tx) error {
+ b := tx.Bucket(attributeBucketName(cnr, "S3-Access-Box-CRDT-Name", make([]byte, bucketKeySize)))
+ require.NotNil(t, b)
+ b = b.Bucket([]byte("CRDT-Name"))
+ require.NotNil(t, b)
+ require.True(t, bytes.Equal(zeroValue, b.Get(objKey)))
+ b = tx.Bucket(attributeBucketName(cnr, objectSDK.AttributeFilePath, make([]byte, bucketKeySize)))
+ require.NotNil(t, b)
+ b = b.Bucket([]byte("/path/to/object"))
+ require.NotNil(t, b)
+ require.True(t, bytes.Equal(zeroValue, b.Get(objKey)))
+ return nil
+ }))
+
+ var dPrm DeletePrm
+ dPrm.SetAddresses(objectCore.AddressOf(obj1), objectCore.AddressOf(obj2))
+ _, err = db.Delete(context.Background(), dPrm)
+ require.NoError(t, err)
+
+ require.NoError(t, db.boltDB.View(func(tx *bbolt.Tx) error {
+ b := tx.Bucket(attributeBucketName(cnr, "S3-Access-Box-CRDT-Name", make([]byte, bucketKeySize)))
+ require.Nil(t, b)
+ b = tx.Bucket(attributeBucketName(cnr, objectSDK.AttributeFilePath, make([]byte, bucketKeySize)))
+ require.Nil(t, b)
+ return nil
+ }))
+}
diff --git a/pkg/local_object_storage/metabase/put.go b/pkg/local_object_storage/metabase/put.go
index 087529895..0c14196b7 100644
--- a/pkg/local_object_storage/metabase/put.go
+++ b/pkg/local_object_storage/metabase/put.go
@@ -35,6 +35,8 @@ type PutPrm struct {
obj *objectSDK.Object
id []byte
+
+ indexAttributes bool
}
// PutRes groups the resulting values of Put operation.
@@ -52,6 +54,10 @@ func (p *PutPrm) SetStorageID(id []byte) {
p.id = id
}
+func (p *PutPrm) SetIndexAttributes(v bool) {
+ p.indexAttributes = v
+}
+
var (
ErrUnknownObjectType = errors.New("unknown object type")
ErrIncorrectSplitInfoUpdate = errors.New("updating split info on object without it")
@@ -90,7 +96,7 @@ func (db *DB) Put(ctx context.Context, prm PutPrm) (res PutRes, err error) {
err = db.boltDB.Batch(func(tx *bbolt.Tx) error {
var e error
- res, e = db.put(tx, prm.obj, prm.id, nil, currEpoch)
+ res, e = db.put(tx, prm.obj, prm.id, nil, currEpoch, prm.indexAttributes)
return e
})
if err == nil {
@@ -108,6 +114,7 @@ func (db *DB) put(tx *bbolt.Tx,
id []byte,
si *objectSDK.SplitInfo,
currEpoch uint64,
+ indexAttributes bool,
) (PutRes, error) {
cnr, ok := obj.ContainerID()
if !ok {
@@ -129,7 +136,7 @@ func (db *DB) put(tx *bbolt.Tx,
return PutRes{}, db.updateObj(tx, obj, id, si, isParent)
}
- return PutRes{Inserted: true}, db.insertObject(tx, obj, id, si, isParent, cnr, currEpoch)
+ return PutRes{Inserted: true}, db.insertObject(tx, obj, id, si, isParent, cnr, currEpoch, indexAttributes)
}
func (db *DB) updateObj(tx *bbolt.Tx, obj *objectSDK.Object, id []byte, si *objectSDK.SplitInfo, isParent bool) error {
@@ -152,14 +159,14 @@ func (db *DB) updateObj(tx *bbolt.Tx, obj *objectSDK.Object, id []byte, si *obje
return nil
}
-func (db *DB) insertObject(tx *bbolt.Tx, obj *objectSDK.Object, id []byte, si *objectSDK.SplitInfo, isParent bool, cnr cid.ID, currEpoch uint64) error {
+func (db *DB) insertObject(tx *bbolt.Tx, obj *objectSDK.Object, id []byte, si *objectSDK.SplitInfo, isParent bool, cnr cid.ID, currEpoch uint64, indexAttributes bool) error {
if par := obj.Parent(); par != nil && !isParent { // limit depth by two
parentSI, err := splitInfoFromObject(obj)
if err != nil {
return err
}
- _, err = db.put(tx, par, id, parentSI, currEpoch)
+ _, err = db.put(tx, par, id, parentSI, currEpoch, indexAttributes)
if err != nil {
return err
}
@@ -175,6 +182,13 @@ func (db *DB) insertObject(tx *bbolt.Tx, obj *objectSDK.Object, id []byte, si *o
return fmt.Errorf("can't put list indexes: %w", err)
}
+ if indexAttributes {
+ err = updateFKBTIndexes(tx, obj, putFKBTIndexItem)
+ if err != nil {
+ return fmt.Errorf("can't put fake bucket tree indexes: %w", err)
+ }
+ }
+
// update container volume size estimation
if obj.Type() == objectSDK.TypeRegular && !isParent {
err = changeContainerSize(tx, cnr, obj.PayloadSize(), true)
@@ -381,6 +395,50 @@ func updateListIndexes(tx *bbolt.Tx, obj *objectSDK.Object, f updateIndexItemFun
return nil
}
+var indexedAttributes = map[string]struct{}{
+ "S3-Access-Box-CRDT-Name": {},
+ objectSDK.AttributeFilePath: {},
+}
+
+// IsAtrributeIndexed returns True if attribute is indexed by metabase.
+func IsAtrributeIndexed(attr string) bool {
+ _, found := indexedAttributes[attr]
+ return found
+}
+
+func updateFKBTIndexes(tx *bbolt.Tx, obj *objectSDK.Object, f updateIndexItemFunc) error {
+ id, _ := obj.ID()
+ cnr, _ := obj.ContainerID()
+ objKey := objectKey(id, make([]byte, objectKeySize))
+
+ key := make([]byte, bucketKeySize)
+ var attrs []objectSDK.Attribute
+ if obj.ECHeader() != nil {
+ attrs = obj.ECHeader().ParentAttributes()
+ objKey = objectKey(obj.ECHeader().Parent(), make([]byte, objectKeySize))
+ } else {
+ attrs = obj.Attributes()
+ }
+
+ // user specified attributes
+ for i := range attrs {
+ if !IsAtrributeIndexed(attrs[i].Key()) {
+ continue
+ }
+ key = attributeBucketName(cnr, attrs[i].Key(), key)
+ err := f(tx, namedBucketItem{
+ name: key,
+ key: []byte(attrs[i].Value()),
+ val: objKey,
+ })
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
func hasExpirationEpoch(obj *objectSDK.Object) (uint64, bool) {
attributes := obj.Attributes()
if ech := obj.ECHeader(); ech != nil {
@@ -425,6 +483,20 @@ func putUniqueIndexItem(tx *bbolt.Tx, item namedBucketItem) error {
return updateUniqueIndexItem(tx, item, func(_, val []byte) ([]byte, error) { return val, nil })
}
+func putFKBTIndexItem(tx *bbolt.Tx, item namedBucketItem) error {
+ bkt, err := createBucketLikelyExists(tx, item.name)
+ if err != nil {
+ return fmt.Errorf("can't create index %v: %w", item.name, err)
+ }
+
+ fkbtRoot, err := createBucketLikelyExists(bkt, item.key)
+ if err != nil {
+ return fmt.Errorf("can't create fake bucket tree index %v: %w", item.key, err)
+ }
+
+ return fkbtRoot.Put(item.val, zeroValue)
+}
+
func putListIndexItem(tx *bbolt.Tx, item namedBucketItem) error {
bkt, err := createBucketLikelyExists(tx, item.name)
if err != nil {
diff --git a/pkg/local_object_storage/metabase/util.go b/pkg/local_object_storage/metabase/util.go
index eef7210dc..4679de332 100644
--- a/pkg/local_object_storage/metabase/util.go
+++ b/pkg/local_object_storage/metabase/util.go
@@ -99,7 +99,6 @@ const (
// userAttributePrefix was used for prefixing FKBT index buckets containing objects.
// Key: attribute value
// Value: bucket containing object IDs as keys
- // removed in version 3
userAttributePrefix
// ====================
@@ -170,6 +169,13 @@ func smallBucketName(cnr cid.ID, key []byte) []byte {
return bucketName(cnr, smallPrefix, key)
}
+// attributeBucketName returns _.
+func attributeBucketName(cnr cid.ID, attributeKey string, key []byte) []byte {
+ key[0] = userAttributePrefix
+ cnr.Encode(key[1:])
+ return append(key[:bucketKeySize], attributeKey...)
+}
+
// rootBucketName returns _root.
func rootBucketName(cnr cid.ID, key []byte) []byte {
return bucketName(cnr, rootPrefix, key)
diff --git a/pkg/local_object_storage/shard/put.go b/pkg/local_object_storage/shard/put.go
index d7a9e7012..24cc75154 100644
--- a/pkg/local_object_storage/shard/put.go
+++ b/pkg/local_object_storage/shard/put.go
@@ -17,7 +17,8 @@ import (
// PutPrm groups the parameters of Put operation.
type PutPrm struct {
- obj *objectSDK.Object
+ obj *objectSDK.Object
+ indexAttributes bool
}
// PutRes groups the resulting values of Put operation.
@@ -28,6 +29,10 @@ func (p *PutPrm) SetObject(obj *objectSDK.Object) {
p.obj = obj
}
+func (p *PutPrm) SetIndexAttributes(v bool) {
+ p.indexAttributes = v
+}
+
// Put saves the object in shard.
//
// Returns any error encountered that
@@ -84,6 +89,7 @@ func (s *Shard) Put(ctx context.Context, prm PutPrm) (PutRes, error) {
var pPrm meta.PutPrm
pPrm.SetObject(prm.obj)
pPrm.SetStorageID(res.StorageID)
+ pPrm.SetIndexAttributes(prm.indexAttributes)
res, err := s.metaBase.Put(ctx, pPrm)
if err != nil {
// may we need to handle this case in a special way
From 1efa64ee72851e53ca5eb2bf643e74141b41ca46 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Wed, 2 Oct 2024 13:04:29 +0300
Subject: [PATCH 183/705] [#1412] metabase: Add search by indexed attributes
Signed-off-by: Dmitrii Stepanov
---
pkg/local_object_storage/metabase/db_test.go | 12 +-
pkg/local_object_storage/metabase/select.go | 115 ++++++++++++++++--
.../metabase/select_test.go | 81 +++++++-----
3 files changed, 169 insertions(+), 39 deletions(-)
diff --git a/pkg/local_object_storage/metabase/db_test.go b/pkg/local_object_storage/metabase/db_test.go
index 01e1ed2bc..0abb5ea89 100644
--- a/pkg/local_object_storage/metabase/db_test.go
+++ b/pkg/local_object_storage/metabase/db_test.go
@@ -32,7 +32,17 @@ func putBig(db *meta.DB, obj *objectSDK.Object) error {
}
func testSelect(t *testing.T, db *meta.DB, cnr cid.ID, fs objectSDK.SearchFilters, exp ...oid.Address) {
- res, err := metaSelect(db, cnr, fs)
+ res, err := metaSelect(db, cnr, fs, false)
+ require.NoError(t, err)
+ require.Len(t, res, len(exp))
+
+ for i := range exp {
+ require.Contains(t, res, exp[i])
+ }
+}
+
+func testSelect2(t *testing.T, db *meta.DB, cnr cid.ID, fs objectSDK.SearchFilters, useAttrIndex bool, exp ...oid.Address) {
+ res, err := metaSelect(db, cnr, fs, useAttrIndex)
require.NoError(t, err)
require.Len(t, res, len(exp))
diff --git a/pkg/local_object_storage/metabase/select.go b/pkg/local_object_storage/metabase/select.go
index 85d1b08ba..88ef7d5a4 100644
--- a/pkg/local_object_storage/metabase/select.go
+++ b/pkg/local_object_storage/metabase/select.go
@@ -37,8 +37,9 @@ type (
// SelectPrm groups the parameters of Select operation.
type SelectPrm struct {
- cnr cid.ID
- filters objectSDK.SearchFilters
+ cnr cid.ID
+ filters objectSDK.SearchFilters
+ useAttributeIndex bool
}
// SelectRes groups the resulting values of Select operation.
@@ -56,6 +57,10 @@ func (p *SelectPrm) SetFilters(fs objectSDK.SearchFilters) {
p.filters = fs
}
+func (p *SelectPrm) SetUseAttributeIndex(v bool) {
+ p.useAttributeIndex = v
+}
+
// AddressList returns list of addresses of the selected objects.
func (r SelectRes) AddressList() []oid.Address {
return r.addrList
@@ -92,14 +97,14 @@ func (db *DB) Select(ctx context.Context, prm SelectPrm) (res SelectRes, err err
currEpoch := db.epochState.CurrentEpoch()
return res, metaerr.Wrap(db.boltDB.View(func(tx *bbolt.Tx) error {
- res.addrList, err = db.selectObjects(tx, prm.cnr, prm.filters, currEpoch)
+ res.addrList, err = db.selectObjects(tx, prm.cnr, prm.filters, currEpoch, prm.useAttributeIndex)
success = err == nil
return err
}))
}
-func (db *DB) selectObjects(tx *bbolt.Tx, cnr cid.ID, fs objectSDK.SearchFilters, currEpoch uint64) ([]oid.Address, error) {
- group, err := groupFilters(fs)
+func (db *DB) selectObjects(tx *bbolt.Tx, cnr cid.ID, fs objectSDK.SearchFilters, currEpoch uint64, useAttributeIndex bool) ([]oid.Address, error) {
+ group, err := groupFilters(fs, useAttributeIndex)
if err != nil {
return nil, err
}
@@ -218,7 +223,13 @@ func (db *DB) selectFastFilter(
selectAllFromBucket(tx, primaryBucketName(cnr, bucketName), to, fNum)
selectAllFromBucket(tx, tombstoneBucketName(cnr, bucketName), to, fNum)
selectAllFromBucket(tx, bucketNameLockers(cnr, bucketName), to, fNum)
- default:
+ default: // user attribute
+ bucketName := attributeBucketName(cnr, f.Header(), bucketName)
+ if f.Operation() == objectSDK.MatchNotPresent {
+ selectOutsideFKBT(tx, allBucketNames(cnr), bucketName, to, fNum)
+ } else {
+ db.selectFromFKBT(tx, bucketName, f, to, fNum)
+ }
}
}
@@ -228,6 +239,15 @@ var mBucketNaming = map[string][]func(cid.ID, []byte) []byte{
v2object.TypeLock.String(): {bucketNameLockers},
}
+func allBucketNames(cnr cid.ID) (names [][]byte) {
+ for _, fns := range mBucketNaming {
+ for _, fn := range fns {
+ names = append(names, fn(cnr, make([]byte, bucketKeySize)))
+ }
+ }
+ return
+}
+
func bucketNamesForType(cnr cid.ID, mType objectSDK.SearchMatchType, typeVal string) (names [][]byte) {
appendNames := func(key string) {
fns, ok := mBucketNaming[key]
@@ -259,6 +279,81 @@ func bucketNamesForType(cnr cid.ID, mType objectSDK.SearchMatchType, typeVal str
return
}
+func (db *DB) selectFromFKBT(
+ tx *bbolt.Tx,
+ name []byte, // fkbt root bucket name
+ f objectSDK.SearchFilter, // filter for operation and value
+ to map[string]int, // resulting cache
+ fNum int, // index of filter
+) { //
+ matchFunc, ok := db.matchers[f.Operation()]
+ if !ok {
+ db.log.Debug(logs.MetabaseMissingMatcher, zap.Stringer("operation", f.Operation()))
+
+ return
+ }
+
+ fkbtRoot := tx.Bucket(name)
+ if fkbtRoot == nil {
+ return
+ }
+
+ err := matchFunc.matchBucket(fkbtRoot, f.Header(), f.Value(), func(k, _ []byte) error {
+ fkbtLeaf := fkbtRoot.Bucket(k)
+ if fkbtLeaf == nil {
+ return nil
+ }
+
+ return fkbtLeaf.ForEach(func(k, _ []byte) error {
+ markAddressInCache(to, fNum, string(k))
+
+ return nil
+ })
+ })
+ if err != nil {
+ db.log.Debug(logs.MetabaseErrorInFKBTSelection, zap.String("error", err.Error()))
+ }
+}
+
+// selectOutsideFKBT looks into all incl buckets to find list of addresses outside to add in
+// resulting cache.
+func selectOutsideFKBT(
+ tx *bbolt.Tx,
+ incl [][]byte, // buckets
+ name []byte, // fkbt root bucket name
+ to map[string]int, // resulting cache
+ fNum int, // index of filter
+) {
+ mExcl := make(map[string]struct{})
+
+ bktExcl := tx.Bucket(name)
+ if bktExcl != nil {
+ _ = bktExcl.ForEachBucket(func(k []byte) error {
+ exclBktLeaf := bktExcl.Bucket(k)
+ return exclBktLeaf.ForEach(func(k, _ []byte) error {
+ mExcl[string(k)] = struct{}{}
+
+ return nil
+ })
+ })
+ }
+
+ for i := range incl {
+ bktIncl := tx.Bucket(incl[i])
+ if bktIncl == nil {
+ continue
+ }
+
+ _ = bktIncl.ForEach(func(k, _ []byte) error {
+ if _, ok := mExcl[string(k)]; !ok {
+ markAddressInCache(to, fNum, string(k))
+ }
+
+ return nil
+ })
+ }
+}
+
// selectFromList looks into index to find list of addresses to add in
// resulting cache.
func (db *DB) selectFromList(
@@ -486,7 +581,7 @@ func attributeValue(obj *objectSDK.Object, attribute string) (string, bool) {
// groupFilters divides filters in two groups: fast and slow. Fast filters
// processed by indexes and slow filters processed after by unmarshaling
// object headers.
-func groupFilters(filters objectSDK.SearchFilters) (filterGroup, error) {
+func groupFilters(filters objectSDK.SearchFilters, useAttributeIndex bool) (filterGroup, error) {
res := filterGroup{
fastFilters: make(objectSDK.SearchFilters, 0, len(filters)),
slowFilters: make(objectSDK.SearchFilters, 0, len(filters)),
@@ -511,7 +606,11 @@ func groupFilters(filters objectSDK.SearchFilters) (filterGroup, error) {
v2object.FilterPropertyPhy:
res.fastFilters = append(res.fastFilters, filters[i])
default:
- res.slowFilters = append(res.slowFilters, filters[i])
+ if useAttributeIndex && IsAtrributeIndexed(filters[i].Header()) {
+ res.fastFilters = append(res.fastFilters, filters[i])
+ } else {
+ res.slowFilters = append(res.slowFilters, filters[i])
+ }
}
}
diff --git a/pkg/local_object_storage/metabase/select_test.go b/pkg/local_object_storage/metabase/select_test.go
index bee778e2b..5cc25a9f6 100644
--- a/pkg/local_object_storage/metabase/select_test.go
+++ b/pkg/local_object_storage/metabase/select_test.go
@@ -26,6 +26,16 @@ import (
func TestDB_SelectUserAttributes(t *testing.T) {
t.Parallel()
+ t.Run("with_index", func(t *testing.T) {
+ testSelectUserAttributes(t, true)
+ })
+ t.Run("without_index", func(t *testing.T) {
+ testSelectUserAttributes(t, false)
+ })
+}
+
+func testSelectUserAttributes(t *testing.T, index bool) {
+ t.Parallel()
db := newDB(t)
defer func() { require.NoError(t, db.Close()) }()
@@ -36,44 +46,52 @@ func TestDB_SelectUserAttributes(t *testing.T) {
testutil.AddAttribute(raw1, "foo", "bar")
testutil.AddAttribute(raw1, "x", "y")
- err := putBig(db, raw1)
+ var putPrm meta.PutPrm
+ putPrm.SetIndexAttributes(index)
+ putPrm.SetObject(raw1)
+ _, err := db.Put(context.Background(), putPrm)
require.NoError(t, err)
raw2 := testutil.GenerateObjectWithCID(cnr)
testutil.AddAttribute(raw2, "foo", "bar")
testutil.AddAttribute(raw2, "x", "z")
- err = putBig(db, raw2)
+ putPrm.SetObject(raw2)
+ _, err = db.Put(context.Background(), putPrm)
require.NoError(t, err)
raw3 := testutil.GenerateObjectWithCID(cnr)
testutil.AddAttribute(raw3, "a", "b")
- err = putBig(db, raw3)
+ putPrm.SetObject(raw3)
+ _, err = db.Put(context.Background(), putPrm)
require.NoError(t, err)
raw4 := testutil.GenerateObjectWithCID(cnr)
- testutil.AddAttribute(raw4, "path", "test/1/2")
+ testutil.AddAttribute(raw4, objectSDK.AttributeFilePath, "/test/1/2")
- err = putBig(db, raw4)
+ putPrm.SetObject(raw4)
+ _, err = db.Put(context.Background(), putPrm)
require.NoError(t, err)
raw5 := testutil.GenerateObjectWithCID(cnr)
- testutil.AddAttribute(raw5, "path", "test/1/3")
+ testutil.AddAttribute(raw5, objectSDK.AttributeFilePath, "/test/1/3")
- err = putBig(db, raw5)
+ putPrm.SetObject(raw5)
+ _, err = db.Put(context.Background(), putPrm)
require.NoError(t, err)
raw6 := testutil.GenerateObjectWithCID(cnr)
- testutil.AddAttribute(raw6, "path", "test/2/3")
+ testutil.AddAttribute(raw6, objectSDK.AttributeFilePath, "/test/2/3")
- err = putBig(db, raw6)
+ putPrm.SetObject(raw6)
+ _, err = db.Put(context.Background(), putPrm)
require.NoError(t, err)
raw7 := testutil.GenerateObjectWithCID(cnr)
var attr objectSDK.Attribute
- attr.SetKey("path")
- attr.SetValue("test/3/4")
+ attr.SetKey(objectSDK.AttributeFilePath)
+ attr.SetValue("/test/3/4")
attrs := raw7.Attributes()
attrs = append(attrs, attr)
ech := objectSDK.NewECHeader(objectSDK.ECParentInfo{
@@ -81,37 +99,39 @@ func TestDB_SelectUserAttributes(t *testing.T) {
Attributes: attrs,
}, 0, 3, []byte{}, 0)
raw7.SetECHeader(ech)
- require.NoError(t, putBig(db, raw7))
+ putPrm.SetObject(raw7)
+ _, err = db.Put(context.Background(), putPrm)
+ require.NoError(t, err)
var raw7Parent oid.Address
raw7Parent.SetContainer(cnr)
raw7Parent.SetObject(ech.Parent())
fs := objectSDK.SearchFilters{}
fs.AddFilter("foo", "bar", objectSDK.MatchStringEqual)
- testSelect(t, db, cnr, fs,
+ testSelect2(t, db, cnr, fs, index,
object.AddressOf(raw1),
object.AddressOf(raw2),
)
fs = objectSDK.SearchFilters{}
fs.AddFilter("x", "y", objectSDK.MatchStringEqual)
- testSelect(t, db, cnr, fs, object.AddressOf(raw1))
+ testSelect2(t, db, cnr, fs, index, object.AddressOf(raw1))
fs = objectSDK.SearchFilters{}
fs.AddFilter("x", "y", objectSDK.MatchStringNotEqual)
- testSelect(t, db, cnr, fs, object.AddressOf(raw2))
+ testSelect2(t, db, cnr, fs, index, object.AddressOf(raw2))
fs = objectSDK.SearchFilters{}
fs.AddFilter("a", "b", objectSDK.MatchStringEqual)
- testSelect(t, db, cnr, fs, object.AddressOf(raw3))
+ testSelect2(t, db, cnr, fs, index, object.AddressOf(raw3))
fs = objectSDK.SearchFilters{}
fs.AddFilter("c", "d", objectSDK.MatchStringEqual)
- testSelect(t, db, cnr, fs)
+ testSelect2(t, db, cnr, fs, index)
fs = objectSDK.SearchFilters{}
fs.AddFilter("foo", "", objectSDK.MatchNotPresent)
- testSelect(t, db, cnr, fs,
+ testSelect2(t, db, cnr, fs, index,
object.AddressOf(raw3),
object.AddressOf(raw4),
object.AddressOf(raw5),
@@ -121,7 +141,7 @@ func TestDB_SelectUserAttributes(t *testing.T) {
fs = objectSDK.SearchFilters{}
fs.AddFilter("a", "", objectSDK.MatchNotPresent)
- testSelect(t, db, cnr, fs,
+ testSelect2(t, db, cnr, fs, index,
object.AddressOf(raw1),
object.AddressOf(raw2),
object.AddressOf(raw4),
@@ -131,7 +151,7 @@ func TestDB_SelectUserAttributes(t *testing.T) {
)
fs = objectSDK.SearchFilters{}
- testSelect(t, db, cnr, fs,
+ testSelect2(t, db, cnr, fs, index,
object.AddressOf(raw1),
object.AddressOf(raw2),
object.AddressOf(raw3),
@@ -143,7 +163,7 @@ func TestDB_SelectUserAttributes(t *testing.T) {
fs = objectSDK.SearchFilters{}
fs.AddFilter("key", "", objectSDK.MatchNotPresent)
- testSelect(t, db, cnr, fs,
+ testSelect2(t, db, cnr, fs, index,
object.AddressOf(raw1),
object.AddressOf(raw2),
object.AddressOf(raw3),
@@ -154,8 +174,8 @@ func TestDB_SelectUserAttributes(t *testing.T) {
)
fs = objectSDK.SearchFilters{}
- fs.AddFilter("path", "test", objectSDK.MatchCommonPrefix)
- testSelect(t, db, cnr, fs,
+ fs.AddFilter(objectSDK.AttributeFilePath, "/test", objectSDK.MatchCommonPrefix)
+ testSelect2(t, db, cnr, fs, index,
object.AddressOf(raw4),
object.AddressOf(raw5),
object.AddressOf(raw6),
@@ -163,15 +183,15 @@ func TestDB_SelectUserAttributes(t *testing.T) {
)
fs = objectSDK.SearchFilters{}
- fs.AddFilter("path", "test/1", objectSDK.MatchCommonPrefix)
- testSelect(t, db, cnr, fs,
+ fs.AddFilter(objectSDK.AttributeFilePath, "/test/1", objectSDK.MatchCommonPrefix)
+ testSelect2(t, db, cnr, fs, index,
object.AddressOf(raw4),
object.AddressOf(raw5),
)
fs = objectSDK.SearchFilters{}
- fs.AddFilter("path", "test/3/4", objectSDK.MatchStringEqual)
- testSelect(t, db, cnr, fs,
+ fs.AddFilter(objectSDK.AttributeFilePath, "/test/3/4", objectSDK.MatchStringEqual)
+ testSelect2(t, db, cnr, fs, index,
raw7Parent,
)
}
@@ -1185,11 +1205,11 @@ func TestExpiredObjects(t *testing.T) {
cidExp, _ := exp.ContainerID()
cidNonExp, _ := nonExp.ContainerID()
- objs, err := metaSelect(db, cidExp, objectSDK.SearchFilters{})
+ objs, err := metaSelect(db, cidExp, objectSDK.SearchFilters{}, false)
require.NoError(t, err)
require.Empty(t, objs) // expired object should not be returned
- objs, err = metaSelect(db, cidNonExp, objectSDK.SearchFilters{})
+ objs, err = metaSelect(db, cidNonExp, objectSDK.SearchFilters{}, false)
require.NoError(t, err)
require.NotEmpty(t, objs)
})
@@ -1211,10 +1231,11 @@ func benchmarkSelect(b *testing.B, db *meta.DB, cid cidSDK.ID, fs objectSDK.Sear
}
}
-func metaSelect(db *meta.DB, cnr cidSDK.ID, fs objectSDK.SearchFilters) ([]oid.Address, error) {
+func metaSelect(db *meta.DB, cnr cidSDK.ID, fs objectSDK.SearchFilters, useAttributeIndex bool) ([]oid.Address, error) {
var prm meta.SelectPrm
prm.SetFilters(fs)
prm.SetContainerID(cnr)
+ prm.SetUseAttributeIndex(useAttributeIndex)
res, err := db.Select(context.Background(), prm)
return res.AddressList(), err
From 4572fa487493f21cc5ebffcdc526270452a36e6a Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Wed, 2 Oct 2024 14:52:54 +0300
Subject: [PATCH 184/705] [#1412] searchSvc: Check container is indexed
For non S3 containers it is expected to use attributes index for some
attributes.
Signed-off-by: Dmitrii Stepanov
---
cmd/frostfs-node/object.go | 5 +++--
pkg/local_object_storage/engine/delete.go | 2 +-
pkg/local_object_storage/engine/inhume_test.go | 4 ++--
pkg/local_object_storage/engine/select.go | 14 ++++++++------
pkg/local_object_storage/engine/tree_test.go | 2 +-
pkg/local_object_storage/shard/select.go | 9 ++++++---
pkg/services/object/search/container.go | 10 ++++++++++
pkg/services/object/search/service.go | 4 ++++
pkg/services/object/search/util.go | 7 ++++++-
9 files changed, 41 insertions(+), 16 deletions(-)
diff --git a/cmd/frostfs-node/object.go b/cmd/frostfs-node/object.go
index f2c4bff1d..68acb05d3 100644
--- a/cmd/frostfs-node/object.go
+++ b/cmd/frostfs-node/object.go
@@ -174,7 +174,7 @@ func initObjectService(c *cfg) {
sPutV2 := createPutSvcV2(sPut, keyStorage)
- sSearch := createSearchSvc(c, keyStorage, traverseGen, c.clientCache)
+ sSearch := createSearchSvc(c, keyStorage, traverseGen, c.clientCache, c.cfgObject.cnrSource)
sSearchV2 := createSearchSvcV2(sSearch, keyStorage)
@@ -366,7 +366,7 @@ func createPatchSvc(sGet *getsvc.Service, sPut *putsvc.Service) *patchsvc.Servic
return patchsvc.NewService(sPut.Config, sGet)
}
-func createSearchSvc(c *cfg, keyStorage *util.KeyStorage, traverseGen *util.TraverserGenerator, coreConstructor *cache.ClientCache) *searchsvc.Service {
+func createSearchSvc(c *cfg, keyStorage *util.KeyStorage, traverseGen *util.TraverserGenerator, coreConstructor *cache.ClientCache, containerSource containercore.Source) *searchsvc.Service {
ls := c.cfgObject.cfgLocalStorage.localStorage
return searchsvc.New(
@@ -377,6 +377,7 @@ func createSearchSvc(c *cfg, keyStorage *util.KeyStorage, traverseGen *util.Trav
),
c.netMapSource,
keyStorage,
+ containerSource,
searchsvc.WithLogger(c.log),
)
}
diff --git a/pkg/local_object_storage/engine/delete.go b/pkg/local_object_storage/engine/delete.go
index 61cb6832d..3ec3f8f9b 100644
--- a/pkg/local_object_storage/engine/delete.go
+++ b/pkg/local_object_storage/engine/delete.go
@@ -143,7 +143,7 @@ func (e *StorageEngine) deleteChildren(ctx context.Context, addr oid.Address, fo
var selectPrm shard.SelectPrm
selectPrm.SetFilters(fs)
- selectPrm.SetContainerID(addr.Container())
+ selectPrm.SetContainerID(addr.Container(), false) // doesn't matter for search by splitID
var inhumePrm shard.InhumePrm
if force {
diff --git a/pkg/local_object_storage/engine/inhume_test.go b/pkg/local_object_storage/engine/inhume_test.go
index f87679253..b4fbbd810 100644
--- a/pkg/local_object_storage/engine/inhume_test.go
+++ b/pkg/local_object_storage/engine/inhume_test.go
@@ -49,7 +49,7 @@ func TestStorageEngine_Inhume(t *testing.T) {
_, err = e.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
- addrs, err := Select(context.Background(), e, cnr, fs)
+ addrs, err := Select(context.Background(), e, cnr, false, fs)
require.NoError(t, err)
require.Empty(t, addrs)
})
@@ -78,7 +78,7 @@ func TestStorageEngine_Inhume(t *testing.T) {
_, err = e.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
- addrs, err := Select(context.Background(), e, cnr, fs)
+ addrs, err := Select(context.Background(), e, cnr, false, fs)
require.NoError(t, err)
require.Empty(t, addrs)
})
diff --git a/pkg/local_object_storage/engine/select.go b/pkg/local_object_storage/engine/select.go
index 972a4f52a..a85891f0c 100644
--- a/pkg/local_object_storage/engine/select.go
+++ b/pkg/local_object_storage/engine/select.go
@@ -14,8 +14,9 @@ import (
// SelectPrm groups the parameters of Select operation.
type SelectPrm struct {
- cnr cid.ID
- filters objectSDK.SearchFilters
+ cnr cid.ID
+ filters objectSDK.SearchFilters
+ indexedContainer bool
}
// SelectRes groups the resulting values of Select operation.
@@ -24,8 +25,9 @@ type SelectRes struct {
}
// WithContainerID is a Select option to set the container id to search in.
-func (p *SelectPrm) WithContainerID(cnr cid.ID) {
+func (p *SelectPrm) WithContainerID(cnr cid.ID, indexedContainer bool) {
p.cnr = cnr
+ p.indexedContainer = indexedContainer
}
// WithFilters is a Select option to set the object filters.
@@ -67,7 +69,7 @@ func (e *StorageEngine) _select(ctx context.Context, prm SelectPrm) (SelectRes,
var outError error
var shPrm shard.SelectPrm
- shPrm.SetContainerID(prm.cnr)
+ shPrm.SetContainerID(prm.cnr, prm.indexedContainer)
shPrm.SetFilters(prm.filters)
e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) {
@@ -140,9 +142,9 @@ func (e *StorageEngine) list(ctx context.Context, limit uint64) (SelectRes, erro
}
// Select selects objects from local storage using provided filters.
-func Select(ctx context.Context, storage *StorageEngine, cnr cid.ID, fs objectSDK.SearchFilters) ([]oid.Address, error) {
+func Select(ctx context.Context, storage *StorageEngine, cnr cid.ID, isIndexedContainer bool, fs objectSDK.SearchFilters) ([]oid.Address, error) {
var selectPrm SelectPrm
- selectPrm.WithContainerID(cnr)
+ selectPrm.WithContainerID(cnr, isIndexedContainer)
selectPrm.WithFilters(fs)
res, err := storage.Select(ctx, selectPrm)
diff --git a/pkg/local_object_storage/engine/tree_test.go b/pkg/local_object_storage/engine/tree_test.go
index 21fcce415..ea0a9e74e 100644
--- a/pkg/local_object_storage/engine/tree_test.go
+++ b/pkg/local_object_storage/engine/tree_test.go
@@ -50,7 +50,7 @@ func benchmarkTreeVsSearch(b *testing.B, objCount int) {
b.Run("search", func(b *testing.B) {
var prm SelectPrm
- prm.WithContainerID(cid)
+ prm.WithContainerID(cid, true)
var fs objectSDK.SearchFilters
fs.AddFilter(pilorama.AttributeFilename, strconv.Itoa(objCount/2), objectSDK.MatchStringEqual)
diff --git a/pkg/local_object_storage/shard/select.go b/pkg/local_object_storage/shard/select.go
index 1615f5fbe..184ca9b71 100644
--- a/pkg/local_object_storage/shard/select.go
+++ b/pkg/local_object_storage/shard/select.go
@@ -15,8 +15,9 @@ import (
// SelectPrm groups the parameters of Select operation.
type SelectPrm struct {
- cnr cid.ID
- filters objectSDK.SearchFilters
+ cnr cid.ID
+ filters objectSDK.SearchFilters
+ isIndexedContainer bool
}
// SelectRes groups the resulting values of Select operation.
@@ -25,8 +26,9 @@ type SelectRes struct {
}
// SetContainerID is a Select option to set the container id to search in.
-func (p *SelectPrm) SetContainerID(cnr cid.ID) {
+func (p *SelectPrm) SetContainerID(cnr cid.ID, isIndexedContainer bool) {
p.cnr = cnr
+ p.isIndexedContainer = isIndexedContainer
}
// SetFilters is a Select option to set the object filters.
@@ -61,6 +63,7 @@ func (s *Shard) Select(ctx context.Context, prm SelectPrm) (SelectRes, error) {
var selectPrm meta.SelectPrm
selectPrm.SetFilters(prm.filters)
selectPrm.SetContainerID(prm.cnr)
+ selectPrm.SetUseAttributeIndex(prm.isIndexedContainer)
mRes, err := s.metaBase.Select(ctx, selectPrm)
if err != nil {
diff --git a/pkg/services/object/search/container.go b/pkg/services/object/search/container.go
index d70574156..39259b0ca 100644
--- a/pkg/services/object/search/container.go
+++ b/pkg/services/object/search/container.go
@@ -8,6 +8,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
+ containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
"go.uber.org/zap"
)
@@ -112,3 +113,12 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) error {
return nil
}
+
+func (exec *execCtx) getContainer() (containerSDK.Container, error) {
+ cnrID := exec.containerID()
+ cnr, err := exec.svc.containerSource.Get(cnrID)
+ if err != nil {
+ return containerSDK.Container{}, err
+ }
+ return cnr.Value, nil
+}
diff --git a/pkg/services/object/search/service.go b/pkg/services/object/search/service.go
index cc388c1b2..7700f78d8 100644
--- a/pkg/services/object/search/service.go
+++ b/pkg/services/object/search/service.go
@@ -54,6 +54,8 @@ type cfg struct {
}
keyStore *util.KeyStorage
+
+ containerSource container.Source
}
// New creates, initializes and returns utility serving
@@ -63,6 +65,7 @@ func New(e *engine.StorageEngine,
tg *util.TraverserGenerator,
ns netmap.Source,
ks *util.KeyStorage,
+ cs container.Source,
opts ...Option,
) *Service {
c := &cfg{
@@ -76,6 +79,7 @@ func New(e *engine.StorageEngine,
traverserGenerator: tg,
currentEpochReceiver: ns,
keyStore: ks,
+ containerSource: cs,
}
for i := range opts {
diff --git a/pkg/services/object/search/util.go b/pkg/services/object/search/util.go
index 67b6c0d01..910384a0b 100644
--- a/pkg/services/object/search/util.go
+++ b/pkg/services/object/search/util.go
@@ -5,6 +5,7 @@ import (
"sync"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
@@ -112,9 +113,13 @@ func (c *clientWrapper) searchObjects(ctx context.Context, exec *execCtx, info c
}
func (e *storageEngineWrapper) search(ctx context.Context, exec *execCtx) ([]oid.ID, error) {
+ cnr, err := exec.getContainer()
+ if err != nil {
+ return nil, err
+ }
var selectPrm engine.SelectPrm
selectPrm.WithFilters(exec.searchFilters())
- selectPrm.WithContainerID(exec.containerID())
+ selectPrm.WithContainerID(exec.containerID(), container.IsIndexedContainer(cnr))
r, err := e.storage.Select(ctx, selectPrm)
if err != nil {
From 3da168f8cf96cb342c4cfe753e38c8a0bd7ad0a4 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Wed, 2 Oct 2024 16:46:39 +0300
Subject: [PATCH 185/705] [#1412] shard: Resolve container is indexed on
metabase resync
Signed-off-by: Dmitrii Stepanov
---
cmd/frostfs-node/config.go | 23 +++++++++++++++++++++--
internal/logs/logs.go | 1 +
pkg/local_object_storage/shard/control.go | 17 +++++++++++++++++
pkg/local_object_storage/shard/shard.go | 10 ++++++++++
4 files changed, 49 insertions(+), 2 deletions(-)
diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go
index 4ad9ec6c6..3c7e310b4 100644
--- a/cmd/frostfs-node/config.go
+++ b/cmd/frostfs-node/config.go
@@ -1058,7 +1058,9 @@ func initLocalStorage(ctx context.Context, c *cfg) {
var shardsAttached int
for _, optsWithMeta := range c.shardOpts(ctx) {
- id, err := ls.AddShard(ctx, append(optsWithMeta.shOpts, shard.WithTombstoneSource(c.createTombstoneSource()))...)
+ id, err := ls.AddShard(ctx, append(optsWithMeta.shOpts,
+ shard.WithTombstoneSource(c.createTombstoneSource()),
+ shard.WithContainerInfoProvider(c.createContainerInfoProvider(ctx)))...)
if err != nil {
c.log.Error(logs.FrostFSNodeFailedToAttachShardToEngine, zap.Error(err))
} else {
@@ -1313,7 +1315,10 @@ func (c *cfg) reloadConfig(ctx context.Context) {
var rcfg engine.ReConfiguration
for _, optsWithID := range c.shardOpts(ctx) {
- rcfg.AddShard(optsWithID.configID, append(optsWithID.shOpts, shard.WithTombstoneSource(c.createTombstoneSource())))
+ rcfg.AddShard(optsWithID.configID, append(optsWithID.shOpts,
+ shard.WithTombstoneSource(c.createTombstoneSource()),
+ shard.WithContainerInfoProvider(c.createContainerInfoProvider(ctx)),
+ ))
}
err = c.cfgObject.cfgLocalStorage.localStorage.Reload(ctx, rcfg)
@@ -1414,6 +1419,20 @@ func (c *cfg) createTombstoneSource() *tombstone.ExpirationChecker {
return tombstoneSource
}
+func (c *cfg) createContainerInfoProvider(ctx context.Context) container.InfoProvider {
+ return container.NewInfoProvider(func() (container.Source, error) {
+ // threadsafe: called on init or on sighup when morph initialized
+ if c.cfgMorph.client == nil {
+ initMorphComponents(ctx, c)
+ }
+ cc, err := containerClient.NewFromMorph(c.cfgMorph.client, c.cfgContainer.scriptHash, 0, containerClient.TryNotary())
+ if err != nil {
+ return nil, err
+ }
+ return containerClient.AsContainerSource(cc), nil
+ })
+}
+
func (c *cfg) shutdown() {
old := c.swapHealthStatus(control.HealthStatus_SHUTTING_DOWN)
if old == control.HealthStatus_SHUTTING_DOWN {
diff --git a/internal/logs/logs.go b/internal/logs/logs.go
index 7aef6873e..1ae4f0d3f 100644
--- a/internal/logs/logs.go
+++ b/internal/logs/logs.go
@@ -249,6 +249,7 @@ const (
ShardMetabaseFailureSwitchingMode = "metabase failure, switching mode"
ShardCantMoveShardToReadonlySwitchMode = "can't move shard to readonly, switch mode"
ShardCouldNotUnmarshalObject = "could not unmarshal object"
+ ShardSkipObjectFromResyncContainerDeleted = "object skipped from metabase resync: container deleted"
ShardCouldNotCloseShardComponent = "could not close shard component"
ShardCantOpenMetabaseMoveToADegradedMode = "can't open metabase, move to a degraded mode"
ShardCantInitializeMetabaseMoveToADegradedreadonlyMode = "can't initialize metabase, move to a degraded-read-only mode"
diff --git a/pkg/local_object_storage/shard/control.go b/pkg/local_object_storage/shard/control.go
index de881654a..4f9f25608 100644
--- a/pkg/local_object_storage/shard/control.go
+++ b/pkg/local_object_storage/shard/control.go
@@ -4,6 +4,7 @@ import (
"context"
"errors"
"fmt"
+ "slices"
"sync"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
@@ -275,6 +276,21 @@ func (s *Shard) refillObject(ctx context.Context, data []byte, addr oid.Address,
return nil
}
+ hasIndexedAttribute := slices.IndexFunc(obj.Attributes(), func(attr objectSDK.Attribute) bool { return meta.IsAtrributeIndexed(attr.Key()) }) > 0
+
+ var isIndexedContainer bool
+ if hasIndexedAttribute {
+ info, err := s.containerInfo.Info(addr.Container())
+ if err != nil {
+ return err
+ }
+ if info.Removed {
+ s.log.Debug(logs.ShardSkipObjectFromResyncContainerDeleted, zap.Stringer("address", addr))
+ return nil
+ }
+ isIndexedContainer = info.Indexed
+ }
+
var err error
switch obj.Type() {
case objectSDK.TypeTombstone:
@@ -290,6 +306,7 @@ func (s *Shard) refillObject(ctx context.Context, data []byte, addr oid.Address,
var mPrm meta.PutPrm
mPrm.SetObject(obj)
mPrm.SetStorageID(descriptor)
+ mPrm.SetIndexAttributes(hasIndexedAttribute && isIndexedContainer)
_, err = s.metaBase.Put(ctx, mPrm)
if err != nil && !client.IsErrObjectAlreadyRemoved(err) && !errors.Is(err, meta.ErrObjectIsExpired) {
diff --git a/pkg/local_object_storage/shard/shard.go b/pkg/local_object_storage/shard/shard.go
index d7e723733..413bfd2f7 100644
--- a/pkg/local_object_storage/shard/shard.go
+++ b/pkg/local_object_storage/shard/shard.go
@@ -7,6 +7,7 @@ import (
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
@@ -95,6 +96,8 @@ type cfg struct {
metricsWriter MetricsWriter
reportErrorFunc func(selfID string, message string, err error)
+
+ containerInfo container.InfoProvider
}
func defaultCfg() *cfg {
@@ -358,6 +361,13 @@ func WithZeroCountCallback(cb EmptyContainersCallback) Option {
}
}
+// WithContainerInfoProvider returns option to set container info provider.
+func WithContainerInfoProvider(containerInfo container.InfoProvider) Option {
+ return func(c *cfg) {
+ c.containerInfo = containerInfo
+ }
+}
+
func (s *Shard) fillInfo() {
s.cfg.info.MetaBaseInfo = s.metaBase.DumpInfo()
s.cfg.info.BlobStorInfo = s.blobStor.DumpInfo()
From 8093e145b316762977aff5b2c8babda64ae7283b Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Thu, 3 Oct 2024 11:06:31 +0300
Subject: [PATCH 186/705] [#1412] adm: Resolve container type by metabase
upgrade
Signed-off-by: Dmitrii Stepanov
---
.../internal/modules/metabase/upgrade.go | 83 +++++++++++++----
pkg/local_object_storage/metabase/upgrade.go | 93 +++++++++++++++++--
.../metabase/upgrade_test.go | 21 +++--
pkg/local_object_storage/metabase/util.go | 15 +++
4 files changed, 179 insertions(+), 33 deletions(-)
diff --git a/cmd/frostfs-adm/internal/modules/metabase/upgrade.go b/cmd/frostfs-adm/internal/modules/metabase/upgrade.go
index 96cb62f10..00b30c9b2 100644
--- a/cmd/frostfs-adm/internal/modules/metabase/upgrade.go
+++ b/cmd/frostfs-adm/internal/modules/metabase/upgrade.go
@@ -1,6 +1,7 @@
package metabase
import (
+ "context"
"errors"
"fmt"
"sync"
@@ -10,19 +11,24 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
engineconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine"
shardconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard"
+ morphconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/morph"
+ nodeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/node"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
+ morphcontainer "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
"github.com/spf13/cobra"
"golang.org/x/sync/errgroup"
)
const (
- pathFlag = "path"
noCompactFlag = "no-compact"
)
-var errNoPathsFound = errors.New("no metabase paths found")
-
-var path string
+var (
+ errNoPathsFound = errors.New("no metabase paths found")
+ errNoMorphEndpointsFound = errors.New("no morph endpoints found")
+)
var UpgradeCmd = &cobra.Command{
Use: "upgrade",
@@ -39,17 +45,10 @@ func upgrade(cmd *cobra.Command, _ []string) error {
if err != nil {
return err
}
- noCompact, _ := cmd.Flags().GetBool(noCompactFlag)
- var paths []string
- if path != "" {
- paths = append(paths, path)
- }
appCfg := config.New(configFile, configDir, config.EnvPrefix)
- if err := engineconfig.IterateShards(appCfg, false, func(sc *shardconfig.Config) error {
- paths = append(paths, sc.Metabase().Path())
- return nil
- }); err != nil {
- return fmt.Errorf("failed to get metabase paths: %w", err)
+ paths, err := getMetabasePaths(appCfg)
+ if err != nil {
+ return err
}
if len(paths) == 0 {
return errNoPathsFound
@@ -58,6 +57,16 @@ func upgrade(cmd *cobra.Command, _ []string) error {
for i, path := range paths {
cmd.Println(i+1, ":", path)
}
+ mc, err := createMorphClient(cmd.Context(), appCfg)
+ if err != nil {
+ return err
+ }
+ defer mc.Close()
+ civ, err := createContainerInfoProvider(mc)
+ if err != nil {
+ return err
+ }
+ noCompact, _ := cmd.Flags().GetBool(noCompactFlag)
result := make(map[string]bool)
var resultGuard sync.Mutex
eg, ctx := errgroup.WithContext(cmd.Context())
@@ -65,7 +74,7 @@ func upgrade(cmd *cobra.Command, _ []string) error {
eg.Go(func() error {
var success bool
cmd.Println("upgrading metabase", path, "...")
- if err := meta.Upgrade(ctx, path, !noCompact, func(a ...any) {
+ if err := meta.Upgrade(ctx, path, !noCompact, civ, func(a ...any) {
cmd.Println(append([]any{time.Now().Format(time.RFC3339), ":", path, ":"}, a...)...)
}); err != nil {
cmd.Println("error: failed to upgrade metabase", path, ":", err)
@@ -92,8 +101,50 @@ func upgrade(cmd *cobra.Command, _ []string) error {
return nil
}
+func getMetabasePaths(appCfg *config.Config) ([]string, error) {
+ var paths []string
+ if err := engineconfig.IterateShards(appCfg, false, func(sc *shardconfig.Config) error {
+ paths = append(paths, sc.Metabase().Path())
+ return nil
+ }); err != nil {
+ return nil, fmt.Errorf("get metabase paths: %w", err)
+ }
+ return paths, nil
+}
+
+func createMorphClient(ctx context.Context, appCfg *config.Config) (*client.Client, error) {
+ addresses := morphconfig.RPCEndpoint(appCfg)
+ if len(addresses) == 0 {
+ return nil, errNoMorphEndpointsFound
+ }
+ key := nodeconfig.Key(appCfg)
+ cli, err := client.New(ctx,
+ key,
+ client.WithDialTimeout(morphconfig.DialTimeout(appCfg)),
+ client.WithEndpoints(addresses...),
+ client.WithSwitchInterval(morphconfig.SwitchInterval(appCfg)),
+ )
+ if err != nil {
+ return nil, fmt.Errorf("create morph client:%w", err)
+ }
+ return cli, nil
+}
+
+func createContainerInfoProvider(cli *client.Client) (container.InfoProvider, error) {
+ sh, err := cli.NNSContractAddress(client.NNSContainerContractName)
+ if err != nil {
+ return nil, fmt.Errorf("resolve container contract hash: %w", err)
+ }
+ cc, err := morphcontainer.NewFromMorph(cli, sh, 0, morphcontainer.TryNotary())
+ if err != nil {
+ return nil, fmt.Errorf("create morph container client: %w", err)
+ }
+ return container.NewInfoProvider(func() (container.Source, error) {
+ return morphcontainer.AsContainerSource(cc), nil
+ }), nil
+}
+
func initUpgradeCommand() {
flags := UpgradeCmd.Flags()
- flags.StringVar(&path, pathFlag, "", "Path to metabase file")
flags.Bool(noCompactFlag, false, "Do not compact upgraded metabase file")
}
diff --git a/pkg/local_object_storage/metabase/upgrade.go b/pkg/local_object_storage/metabase/upgrade.go
index b5de430dc..f2a0107a1 100644
--- a/pkg/local_object_storage/metabase/upgrade.go
+++ b/pkg/local_object_storage/metabase/upgrade.go
@@ -4,6 +4,7 @@ import (
"bytes"
"context"
"encoding/binary"
+ "encoding/hex"
"errors"
"fmt"
"os"
@@ -12,6 +13,7 @@ import (
"time"
objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.etcd.io/bbolt"
@@ -25,15 +27,15 @@ const (
upgradeTimeout = 1 * time.Second
)
-var updates = map[uint64]func(ctx context.Context, db *bbolt.DB, log func(a ...any)) error{
+var updates = map[uint64]func(ctx context.Context, db *bbolt.DB, cs container.InfoProvider, log func(a ...any)) error{
2: upgradeFromV2ToV3,
- 3: func(_ context.Context, _ *bbolt.DB, log func(a ...any)) error {
+ 3: func(_ context.Context, _ *bbolt.DB, _ container.InfoProvider, log func(a ...any)) error {
log("metabase already upgraded")
return nil
},
}
-func Upgrade(ctx context.Context, path string, compact bool, log func(a ...any)) error {
+func Upgrade(ctx context.Context, path string, compact bool, cs container.InfoProvider, log func(a ...any)) error {
if _, err := os.Stat(path); err != nil {
return fmt.Errorf("check metabase existence: %w", err)
}
@@ -61,7 +63,7 @@ func Upgrade(ctx context.Context, path string, compact bool, log func(a ...any))
}); err != nil {
return fmt.Errorf("set upgrade key %w", err)
}
- if err := updater(ctx, db, log); err != nil {
+ if err := updater(ctx, db, cs, log); err != nil {
return fmt.Errorf("update metabase schema: %w", err)
}
if err := db.Update(func(tx *bbolt.Tx) error {
@@ -113,11 +115,11 @@ func compactDB(db *bbolt.DB) error {
return nil
}
-func upgradeFromV2ToV3(ctx context.Context, db *bbolt.DB, log func(a ...any)) error {
+func upgradeFromV2ToV3(ctx context.Context, db *bbolt.DB, cs container.InfoProvider, log func(a ...any)) error {
if err := createExpirationEpochBuckets(ctx, db, log); err != nil {
return err
}
- if err := dropUserAttributes(ctx, db, log); err != nil {
+ if err := dropUserAttributes(ctx, db, cs, log); err != nil {
return err
}
if err := dropOwnerIDIndex(ctx, db, log); err != nil {
@@ -323,10 +325,81 @@ func iterateExpirationAttributeKeyBucket(ctx context.Context, b *bbolt.Bucket, i
return nil
}
-func dropUserAttributes(ctx context.Context, db *bbolt.DB, log func(a ...any)) error {
- return dropBucketsByPrefix(ctx, db, []byte{userAttributePrefix}, func(a ...any) {
- log(append([]any{"user attributes:"}, a...)...)
- })
+func dropUserAttributes(ctx context.Context, db *bbolt.DB, cs container.InfoProvider, log func(a ...any)) error {
+ log("deleting user attribute buckets...")
+ const batch = 1000
+ prefix := []byte{userAttributePrefix}
+ last := prefix
+ var count uint64
+ for {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+ var keys [][]byte
+ if err := db.View(func(tx *bbolt.Tx) error {
+ c := tx.Cursor()
+ for k, _ := c.Seek(last); k != nil && bytes.HasPrefix(k, prefix) && len(keys) < batch; k, _ = c.Next() {
+ if bytes.Equal(last, k) {
+ continue
+ }
+ keys = append(keys, bytes.Clone(k))
+ }
+ return nil
+ }); err != nil {
+ log("deleting user attribute buckets completed with an error:", err)
+ return err
+ }
+ if len(keys) == 0 {
+ log("deleting user attribute buckets completed successfully, deleted", count, "buckets")
+ return nil
+ }
+ last = keys[len(keys)-1]
+ keysToDrop, err := selectUserAttributeKeysToDrop(keys, cs)
+ if err != nil {
+ return err
+ }
+ if err := db.Update(func(tx *bbolt.Tx) error {
+ for _, k := range keysToDrop {
+ if err := tx.DeleteBucket(k); err != nil {
+ return err
+ }
+ }
+ return nil
+ }); err != nil {
+ log("deleting buckets completed with an error:", err)
+ return err
+ }
+ count += uint64(len(keysToDrop))
+ log("deleted", count, "buckets")
+ }
+}
+
+func selectUserAttributeKeysToDrop(keys [][]byte, cs container.InfoProvider) ([][]byte, error) {
+ var keysToDrop [][]byte
+ for _, key := range keys {
+ attr, ok := attributeFromAttributeBucket(key)
+ if !ok {
+ return nil, fmt.Errorf("failed to parse attribute key from user attribute bucket key %s", hex.EncodeToString(key))
+ }
+ if !IsAtrributeIndexed(attr) {
+ keysToDrop = append(keysToDrop, key)
+ continue
+ }
+ contID, ok := cidFromAttributeBucket(key)
+ if !ok {
+ return nil, fmt.Errorf("failed to parse container ID from user attribute bucket key %s", hex.EncodeToString(key))
+ }
+ info, err := cs.Info(contID)
+ if err != nil {
+ return nil, err
+ }
+ if info.Removed || !info.Indexed {
+ keysToDrop = append(keysToDrop, key)
+ }
+ }
+ return keysToDrop, nil
}
func dropOwnerIDIndex(ctx context.Context, db *bbolt.DB, log func(a ...any)) error {
diff --git a/pkg/local_object_storage/metabase/upgrade_test.go b/pkg/local_object_storage/metabase/upgrade_test.go
index 3797de0a4..9c525291a 100644
--- a/pkg/local_object_storage/metabase/upgrade_test.go
+++ b/pkg/local_object_storage/metabase/upgrade_test.go
@@ -12,6 +12,7 @@ import (
"time"
objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
@@ -35,13 +36,19 @@ func TestUpgradeV2ToV3(t *testing.T) {
require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
require.ErrorIs(t, db.Init(), ErrOutdatedVersion)
require.NoError(t, db.Close())
- require.NoError(t, Upgrade(context.Background(), path, true, t.Log))
+ require.NoError(t, Upgrade(context.Background(), path, true, &testContainerInfoProvider{}, t.Log))
require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
require.NoError(t, db.Init())
require.NoError(t, db.Close())
fmt.Println()
}
+type testContainerInfoProvider struct{}
+
+func (p *testContainerInfoProvider) Info(id cid.ID) (container.Info, error) {
+ return container.Info{}, nil
+}
+
func createTempCopy(t *testing.T, path string) string {
src, err := os.Open(path)
require.NoError(t, err)
@@ -95,7 +102,7 @@ func TestGenerateMetabaseFile(t *testing.T) {
i := i
eg.Go(func() error {
obj := testutil.GenerateObjectWithCID(containers[i%len(containers)])
- testutil.AddAttribute(obj, "FileName", strconv.FormatInt(int64(i%maxFilename), 10))
+ testutil.AddAttribute(obj, objectSDK.AttributeFilePath, strconv.FormatInt(int64(i%maxFilename), 10))
testutil.AddAttribute(obj, objectV2.SysAttributeExpEpoch, strconv.FormatUint(uint64(i%minEpoch+minEpoch), 10))
_, err := db.Put(ctx, PutPrm{
obj: obj,
@@ -118,8 +125,8 @@ func TestGenerateMetabaseFile(t *testing.T) {
child.SetParent(parent)
idParent, _ := parent.ID()
child.SetParentID(idParent)
- testutil.AddAttribute(child, "FileName", strconv.FormatInt(int64(i%maxFilename), 10))
- testutil.AddAttribute(parent, "FileName", strconv.FormatInt(int64(i%maxFilename), 10))
+ testutil.AddAttribute(child, objectSDK.AttributeFilePath, strconv.FormatInt(int64(i%maxFilename), 10))
+ testutil.AddAttribute(parent, objectSDK.AttributeFilePath, strconv.FormatInt(int64(i%maxFilename), 10))
testutil.AddAttribute(child, objectV2.SysAttributeExpEpoch, strconv.FormatUint(uint64(i%minEpoch+minEpoch), 10))
testutil.AddAttribute(parent, objectV2.SysAttributeExpEpoch, strconv.FormatUint(uint64(i%minEpoch+minEpoch), 10))
_, err := db.Put(ctx, PutPrm{
@@ -138,7 +145,7 @@ func TestGenerateMetabaseFile(t *testing.T) {
i := i
eg.Go(func() error {
obj := testutil.GenerateObjectWithCID(containers[i%len(containers)])
- testutil.AddAttribute(obj, "FileName", strconv.FormatInt(int64(i%maxFilename), 10))
+ testutil.AddAttribute(obj, objectSDK.AttributeFilePath, strconv.FormatInt(int64(i%maxFilename), 10))
_, err := db.Put(ctx, PutPrm{
obj: obj,
id: []byte(strconv.FormatInt(int64(i%maxStorageID), 10) + "/" + strconv.FormatInt(int64(i%maxStorageID), 10)),
@@ -160,7 +167,7 @@ func TestGenerateMetabaseFile(t *testing.T) {
i := i
eg.Go(func() error {
obj := testutil.GenerateObjectWithCID(containers[i%len(containers)])
- testutil.AddAttribute(obj, "FileName", strconv.FormatInt(int64(i%maxFilename), 10))
+ testutil.AddAttribute(obj, objectSDK.AttributeFilePath, strconv.FormatInt(int64(i%maxFilename), 10))
_, err := db.Put(ctx, PutPrm{
obj: obj,
id: []byte(strconv.FormatInt(int64(i%maxStorageID), 10) + "/" + strconv.FormatInt(int64(i%maxStorageID), 10)),
@@ -190,7 +197,7 @@ func TestGenerateMetabaseFile(t *testing.T) {
i := i
eg.Go(func() error {
obj := testutil.GenerateObjectWithCID(containers[i%len(containers)])
- testutil.AddAttribute(obj, "FileName", strconv.FormatInt(int64(i%maxFilename), 10))
+ testutil.AddAttribute(obj, objectSDK.AttributeFilePath, strconv.FormatInt(int64(i%maxFilename), 10))
_, err := db.Put(ctx, PutPrm{
obj: obj,
id: []byte(strconv.FormatInt(int64(i%maxStorageID), 10) + "/" + strconv.FormatInt(int64(i%maxStorageID), 10)),
diff --git a/pkg/local_object_storage/metabase/util.go b/pkg/local_object_storage/metabase/util.go
index 4679de332..0a2f91a47 100644
--- a/pkg/local_object_storage/metabase/util.go
+++ b/pkg/local_object_storage/metabase/util.go
@@ -176,6 +176,21 @@ func attributeBucketName(cnr cid.ID, attributeKey string, key []byte) []byte {
return append(key[:bucketKeySize], attributeKey...)
}
+func cidFromAttributeBucket(bucketName []byte) (cid.ID, bool) {
+ if len(bucketName) < bucketKeySize || bucketName[0] != userAttributePrefix {
+ return cid.ID{}, false
+ }
+ var result cid.ID
+ return result, result.Decode(bucketName[1:bucketKeySize]) == nil
+}
+
+func attributeFromAttributeBucket(bucketName []byte) (string, bool) {
+ if len(bucketName) < bucketKeySize || bucketName[0] != userAttributePrefix {
+ return "", false
+ }
+ return string(bucketName[bucketKeySize:]), true
+}
+
// rootBucketName returns _root.
func rootBucketName(cnr cid.ID, key []byte) []byte {
return bucketName(cnr, rootPrefix, key)
From 87f4b934d1cca9a671dc93fcc4cdb5861be35915 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Thu, 3 Oct 2024 17:57:21 +0300
Subject: [PATCH 187/705] [#1412] metabase: Run bucket drop steps on upgrade
concurrently
Signed-off-by: Dmitrii Stepanov
---
pkg/local_object_storage/metabase/upgrade.go | 22 ++++++++++++--------
1 file changed, 13 insertions(+), 9 deletions(-)
diff --git a/pkg/local_object_storage/metabase/upgrade.go b/pkg/local_object_storage/metabase/upgrade.go
index f2a0107a1..a997b90a0 100644
--- a/pkg/local_object_storage/metabase/upgrade.go
+++ b/pkg/local_object_storage/metabase/upgrade.go
@@ -119,13 +119,17 @@ func upgradeFromV2ToV3(ctx context.Context, db *bbolt.DB, cs container.InfoProvi
if err := createExpirationEpochBuckets(ctx, db, log); err != nil {
return err
}
- if err := dropUserAttributes(ctx, db, cs, log); err != nil {
- return err
- }
- if err := dropOwnerIDIndex(ctx, db, log); err != nil {
- return err
- }
- if err := dropPayloadChecksumIndex(ctx, db, log); err != nil {
+ eg, ctx := errgroup.WithContext(ctx)
+ eg.Go(func() error {
+ return dropUserAttributes(ctx, db, cs, log)
+ })
+ eg.Go(func() error {
+ return dropOwnerIDIndex(ctx, db, log)
+ })
+ eg.Go(func() error {
+ return dropPayloadChecksumIndex(ctx, db, log)
+ })
+ if err := eg.Wait(); err != nil {
return err
}
return db.Update(func(tx *bbolt.Tx) error {
@@ -360,7 +364,7 @@ func dropUserAttributes(ctx context.Context, db *bbolt.DB, cs container.InfoProv
if err != nil {
return err
}
- if err := db.Update(func(tx *bbolt.Tx) error {
+ if err := db.Batch(func(tx *bbolt.Tx) error {
for _, k := range keysToDrop {
if err := tx.DeleteBucket(k); err != nil {
return err
@@ -439,7 +443,7 @@ func dropBucketsByPrefix(ctx context.Context, db *bbolt.DB, prefix []byte, log f
log("deleting buckets completed successfully, deleted", count, "buckets")
return nil
}
- if err := db.Update(func(tx *bbolt.Tx) error {
+ if err := db.Batch(func(tx *bbolt.Tx) error {
for _, k := range keys {
if err := tx.DeleteBucket(k); err != nil {
return err
From fe9f664b577f1b51797e375ef736977ed61d9757 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Fri, 4 Oct 2024 10:49:39 +0300
Subject: [PATCH 188/705] [#1412] metabase: Drop empty user attribute buckets
on upgrade
Signed-off-by: Dmitrii Stepanov
---
pkg/local_object_storage/metabase/upgrade.go | 166 +++++++++++++++++--
1 file changed, 154 insertions(+), 12 deletions(-)
diff --git a/pkg/local_object_storage/metabase/upgrade.go b/pkg/local_object_storage/metabase/upgrade.go
index a997b90a0..1f2c7956b 100644
--- a/pkg/local_object_storage/metabase/upgrade.go
+++ b/pkg/local_object_storage/metabase/upgrade.go
@@ -360,26 +360,40 @@ func dropUserAttributes(ctx context.Context, db *bbolt.DB, cs container.InfoProv
return nil
}
last = keys[len(keys)-1]
- keysToDrop, err := selectUserAttributeKeysToDrop(keys, cs)
+ cnt, err := dropNonIndexedUserAttributeBuckets(db, cs, keys)
if err != nil {
+ log("deleting user attribute buckets completed with an error:", err)
return err
}
- if err := db.Batch(func(tx *bbolt.Tx) error {
- for _, k := range keysToDrop {
- if err := tx.DeleteBucket(k); err != nil {
- return err
- }
- }
- return nil
- }); err != nil {
- log("deleting buckets completed with an error:", err)
+ count += cnt
+ cnt, err = dropEmptyUserAttributeBuckets(ctx, db, keys)
+ if err != nil {
+ log("deleting user attribute buckets completed with an error:", err)
return err
}
- count += uint64(len(keysToDrop))
- log("deleted", count, "buckets")
+ count += cnt
+ log("deleted", count, "user attribute buckets")
}
}
+func dropNonIndexedUserAttributeBuckets(db *bbolt.DB, cs container.InfoProvider, keys [][]byte) (uint64, error) {
+ keysToDrop, err := selectUserAttributeKeysToDrop(keys, cs)
+ if err != nil {
+ return 0, fmt.Errorf("select non indexed user attributes: %w", err)
+ }
+ if err := db.Batch(func(tx *bbolt.Tx) error {
+ for _, k := range keysToDrop {
+ if err := tx.DeleteBucket(k); err != nil {
+ return err
+ }
+ }
+ return nil
+ }); err != nil {
+ return 0, fmt.Errorf("drop non indexed user attributes: %w", err)
+ }
+ return uint64(len(keysToDrop)), nil
+}
+
func selectUserAttributeKeysToDrop(keys [][]byte, cs container.InfoProvider) ([][]byte, error) {
var keysToDrop [][]byte
for _, key := range keys {
@@ -406,6 +420,134 @@ func selectUserAttributeKeysToDrop(keys [][]byte, cs container.InfoProvider) ([]
return keysToDrop, nil
}
+func dropEmptyUserAttributeBuckets(ctx context.Context, db *bbolt.DB, keys [][]byte) (uint64, error) {
+ var dropBuckets [][]byte
+ for _, key := range keys {
+ select {
+ case <-ctx.Done():
+ return 0, ctx.Err()
+ default:
+ }
+
+ if err := dropEmptyNestedBuckets(ctx, db, key); err != nil {
+ return 0, err
+ }
+
+ empty, exists, err := bucketIsEmpty(db, key)
+ if err != nil {
+ return 0, err
+ }
+ if empty && exists {
+ dropBuckets = append(dropBuckets, key)
+ }
+ }
+ if len(dropBuckets) == 0 {
+ return 0, nil
+ }
+ if err := db.Batch(func(tx *bbolt.Tx) error {
+ for _, key := range dropBuckets {
+ if err := tx.DeleteBucket(key); err != nil {
+ return err
+ }
+ }
+ return nil
+ }); err != nil {
+ return 0, fmt.Errorf("drop empty user attributes buckets: %w", err)
+ }
+ return uint64(len(dropBuckets)), nil
+}
+
+func bucketIsEmpty(db *bbolt.DB, bucketKey []byte) (bool, bool, error) {
+ var empty bool
+ var exists bool
+ if err := db.View(func(tx *bbolt.Tx) error {
+ b := tx.Bucket(bucketKey)
+ if b == nil {
+ return nil
+ }
+ exists = true
+ empty = !hasAnyItem(b)
+ return nil
+ }); err != nil {
+ return false, false, fmt.Errorf("bucket empty check: %w", err)
+ }
+ return empty, exists, nil
+}
+
+func dropEmptyNestedBuckets(ctx context.Context, db *bbolt.DB, rootBucketKey []byte) error {
+ var last []byte
+ for {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+
+ var dropBuckets [][]byte
+ var err error
+ dropBuckets, last, err = selectEmptyNestedBuckets(ctx, db, rootBucketKey, last)
+ if err != nil {
+ return fmt.Errorf("select empty nested buckets: %w", err)
+ }
+ if len(dropBuckets) == 0 {
+ return nil
+ }
+
+ if err := db.Batch(func(tx *bbolt.Tx) error {
+ rootBucket := tx.Bucket(rootBucketKey)
+ if rootBucket == nil {
+ return nil
+ }
+ for _, sb := range dropBuckets {
+ if err := rootBucket.DeleteBucket(sb); err != nil {
+ return err
+ }
+ }
+ return nil
+ }); err != nil {
+ return fmt.Errorf("drop empty nested buckets: %w", err)
+ }
+ }
+}
+
+func selectEmptyNestedBuckets(ctx context.Context, db *bbolt.DB, rootBucketKey, last []byte) ([][]byte, []byte, error) {
+ const batchSize = 1000
+ var result [][]byte
+ if err := db.View(func(tx *bbolt.Tx) error {
+ rootBucket := tx.Bucket(rootBucketKey)
+ if rootBucket == nil {
+ return nil
+ }
+ c := rootBucket.Cursor()
+ for k, v := c.Seek(last); k != nil && len(result) < batchSize; k, v = c.Next() {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+
+ if bytes.Equal(last, k) {
+ continue
+ }
+ last = bytes.Clone(k)
+ if v != nil { // record
+ continue
+ }
+ nestedBucket := rootBucket.Bucket(k)
+ if nestedBucket == nil {
+ continue
+ }
+ if !hasAnyItem(nestedBucket) {
+ result = append(result, bytes.Clone(k))
+ }
+ }
+ return nil
+ }); err != nil {
+ return nil, nil, err
+ }
+ return result, last, nil
+}
+
func dropOwnerIDIndex(ctx context.Context, db *bbolt.DB, log func(a ...any)) error {
return dropBucketsByPrefix(ctx, db, []byte{ownerPrefix}, func(a ...any) {
log(append([]any{"owner ID index:"}, a...)...)
From c065d55ca31c18fa48c8d8a173237095179be732 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Mon, 7 Oct 2024 17:19:04 +0300
Subject: [PATCH 189/705] [#1412] metabase: Drop logging inside transaction
This could lead to hang the db.
Signed-off-by: Dmitrii Stepanov
---
internal/logs/logs.go | 6 ----
pkg/local_object_storage/metabase/select.go | 31 ++-------------------
2 files changed, 2 insertions(+), 35 deletions(-)
diff --git a/internal/logs/logs.go b/internal/logs/logs.go
index 1ae4f0d3f..84bd023f1 100644
--- a/internal/logs/logs.go
+++ b/internal/logs/logs.go
@@ -226,12 +226,6 @@ const (
EngineFinishedSuccessfullyShardsEvacuation = "shards evacuation finished successfully"
EngineFinishedWithErrorShardsEvacuation = "shards evacuation finished with error"
EngineObjectIsMovedToAnotherShard = "object is moved to another shard"
- MetabaseMissingMatcher = "missing matcher"
- MetabaseErrorInFKBTSelection = "error in FKBT selection"
- MetabaseCantDecodeListBucketLeaf = "can't decode list bucket leaf"
- MetabaseUnknownOperation = "unknown operation"
- MetabaseCantIterateOverTheBucket = "can't iterate over the bucket"
- MetabaseCouldNotIterateOverTheBuckets = "could not iterate over the buckets"
MetabaseCreatedDirectoryForMetabase = "created directory for Metabase"
MetabaseOpenedBoltDBInstanceForMetabase = "opened boltDB instance for Metabase"
MetabaseCheckingMetabaseVersion = "checking metabase version"
diff --git a/pkg/local_object_storage/metabase/select.go b/pkg/local_object_storage/metabase/select.go
index 88ef7d5a4..41f05b756 100644
--- a/pkg/local_object_storage/metabase/select.go
+++ b/pkg/local_object_storage/metabase/select.go
@@ -9,7 +9,6 @@ import (
"time"
v2object "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
@@ -18,7 +17,6 @@ import (
"go.etcd.io/bbolt"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
- "go.uber.org/zap"
)
type (
@@ -288,8 +286,6 @@ func (db *DB) selectFromFKBT(
) { //
matchFunc, ok := db.matchers[f.Operation()]
if !ok {
- db.log.Debug(logs.MetabaseMissingMatcher, zap.Stringer("operation", f.Operation()))
-
return
}
@@ -298,7 +294,7 @@ func (db *DB) selectFromFKBT(
return
}
- err := matchFunc.matchBucket(fkbtRoot, f.Header(), f.Value(), func(k, _ []byte) error {
+ _ = matchFunc.matchBucket(fkbtRoot, f.Header(), f.Value(), func(k, _ []byte) error {
fkbtLeaf := fkbtRoot.Bucket(k)
if fkbtLeaf == nil {
return nil
@@ -310,9 +306,6 @@ func (db *DB) selectFromFKBT(
return nil
})
})
- if err != nil {
- db.log.Debug(logs.MetabaseErrorInFKBTSelection, zap.String("error", err.Error()))
- }
}
// selectOutsideFKBT looks into all incl buckets to find list of addresses outside to add in
@@ -377,24 +370,17 @@ func (db *DB) selectFromList(
case objectSDK.MatchStringEqual:
lst, err = decodeList(bkt.Get(bucketKeyHelper(f.Header(), f.Value())))
if err != nil {
- db.log.Debug(logs.MetabaseCantDecodeListBucketLeaf, zap.String("error", err.Error()))
return
}
default:
fMatch, ok := db.matchers[op]
if !ok {
- db.log.Debug(logs.MetabaseUnknownOperation, zap.Uint32("operation", uint32(op)))
-
return
}
if err = fMatch.matchBucket(bkt, f.Header(), f.Value(), func(_, val []byte) error {
l, err := decodeList(val)
if err != nil {
- db.log.Debug(logs.MetabaseCantDecodeListBucketLeaf,
- zap.String("error", err.Error()),
- )
-
return err
}
@@ -402,10 +388,6 @@ func (db *DB) selectFromList(
return nil
}); err != nil {
- db.log.Debug(logs.MetabaseCantIterateOverTheBucket,
- zap.String("error", err.Error()),
- )
-
return
}
}
@@ -447,10 +429,6 @@ func (db *DB) selectObjectID(
default:
fMatch, ok := db.matchers[op]
if !ok {
- db.log.Debug(logs.MetabaseUnknownOperation,
- zap.Uint32("operation", uint32(f.Operation())),
- )
-
return
}
@@ -461,18 +439,13 @@ func (db *DB) selectObjectID(
return
}
- err := fMatch.matchBucket(bkt, f.Header(), f.Value(), func(k, _ []byte) error {
+ _ = fMatch.matchBucket(bkt, f.Header(), f.Value(), func(k, _ []byte) error {
var id oid.ID
if err := id.Decode(k); err == nil {
appendOID(id)
}
return nil
})
- if err != nil {
- db.log.Debug(logs.MetabaseCouldNotIterateOverTheBuckets,
- zap.String("error", err.Error()),
- )
- }
}
}
}
From 936ebbb8e5c1b1967e6fd0ec5ec50bd282c1f8bc Mon Sep 17 00:00:00 2001
From: Anton Nikiforov
Date: Tue, 8 Oct 2024 18:39:52 +0300
Subject: [PATCH 190/705] [#1423] metabase: Hide `BucketName` form upper levels
Signed-off-by: Anton Nikiforov
---
pkg/local_object_storage/engine/evacuate.go | 12 +-
pkg/local_object_storage/metabase/list.go | 104 ++++++++++--------
.../metabase/list_test.go | 57 ++++++++++
pkg/local_object_storage/shard/list.go | 32 +++---
4 files changed, 143 insertions(+), 62 deletions(-)
diff --git a/pkg/local_object_storage/engine/evacuate.go b/pkg/local_object_storage/engine/evacuate.go
index 1baf237f9..c1b9276f3 100644
--- a/pkg/local_object_storage/engine/evacuate.go
+++ b/pkg/local_object_storage/engine/evacuate.go
@@ -435,7 +435,7 @@ func (e *StorageEngine) evacuateShardObjects(ctx context.Context, cancel context
) error {
sh := shardsToEvacuate[shardID]
var cntPrm shard.IterateOverContainersPrm
- cntPrm.Handler = func(ctx context.Context, name []byte, cnt cid.ID) error {
+ cntPrm.Handler = func(ctx context.Context, objType objectSDK.Type, cnt cid.ID) error {
select {
case <-ctx.Done():
return context.Cause(ctx)
@@ -455,8 +455,11 @@ func (e *StorageEngine) evacuateShardObjects(ctx context.Context, cancel context
skip = e.isNotRepOne(c)
}
if skip {
- countPrm := shard.CountAliveObjectsInBucketPrm{BucketName: name}
- count, err := sh.CountAliveObjectsInBucket(ctx, countPrm)
+ countPrm := shard.CountAliveObjectsInContainerPrm{
+ ObjectType: objType,
+ ContainerID: cnt,
+ }
+ count, err := sh.CountAliveObjectsInContainer(ctx, countPrm)
if err != nil {
return err
}
@@ -464,7 +467,8 @@ func (e *StorageEngine) evacuateShardObjects(ctx context.Context, cancel context
return nil
}
var objPrm shard.IterateOverObjectsInContainerPrm
- objPrm.BucketName = name
+ objPrm.ObjectType = objType
+ objPrm.ContainerID = cnt
objPrm.Handler = func(ctx context.Context, objInfo *object.Info) error {
select {
case <-ctx.Done():
diff --git a/pkg/local_object_storage/metabase/list.go b/pkg/local_object_storage/metabase/list.go
index 74a529809..b007ef0da 100644
--- a/pkg/local_object_storage/metabase/list.go
+++ b/pkg/local_object_storage/metabase/list.go
@@ -65,21 +65,25 @@ func (l ListRes) Cursor() *Cursor {
// IterateOverContainersPrm contains parameters for IterateOverContainers operation.
type IterateOverContainersPrm struct {
// Handler function executed upon containers in db.
- Handler func(context.Context, []byte, cid.ID) error
+ Handler func(context.Context, objectSDK.Type, cid.ID) error
}
// IterateOverObjectsInContainerPrm contains parameters for IterateOverObjectsInContainer operation.
type IterateOverObjectsInContainerPrm struct {
- // BucketName container's bucket name.
- BucketName []byte
+ // ObjectType type of objects to iterate over.
+ ObjectType objectSDK.Type
+ // ContainerID container for objects to iterate over.
+ ContainerID cid.ID
// Handler function executed upon objects in db.
Handler func(context.Context, *objectcore.Info) error
}
-// CountAliveObjectsInBucketPrm contains parameters for IterateOverObjectsInContainer operation.
-type CountAliveObjectsInBucketPrm struct {
- // BucketName container's bucket name.
- BucketName []byte
+// CountAliveObjectsInContainerPrm contains parameters for IterateOverObjectsInContainer operation.
+type CountAliveObjectsInContainerPrm struct {
+ // ObjectType type of objects to iterate over.
+ ObjectType objectSDK.Type
+ // ContainerID container for objects to iterate over.
+ ContainerID cid.ID
}
// ListWithCursor lists physical objects available in metabase starting from
@@ -319,12 +323,20 @@ func (db *DB) iterateOverContainers(ctx context.Context, tx *bbolt.Tx, prm Itera
if cidRaw == nil {
continue
}
-
- bktName := make([]byte, len(name))
- copy(bktName, name)
var cnt cid.ID
copy(cnt[:], containerID[:])
- err := prm.Handler(ctx, bktName, cnt)
+ var objType objectSDK.Type
+ switch prefix[0] {
+ case primaryPrefix:
+ objType = objectSDK.TypeRegular
+ case lockersPrefix:
+ objType = objectSDK.TypeLock
+ case tombstonePrefix:
+ objType = objectSDK.TypeTombstone
+ default:
+ continue
+ }
+ err := prm.Handler(ctx, objType, cnt)
if err != nil {
return err
}
@@ -356,22 +368,29 @@ func (db *DB) IterateOverObjectsInContainer(ctx context.Context, prm IterateOver
return ErrDegradedMode
}
- var containerID cid.ID
- cidRaw, prefix := parseContainerIDWithPrefix(&containerID, prm.BucketName)
- if cidRaw == nil {
- return nil
- }
err := db.boltDB.View(func(tx *bbolt.Tx) error {
- return db.iterateOverObjectsInContainer(ctx, tx, cidRaw, prefix, containerID, prm)
+ return db.iterateOverObjectsInContainer(ctx, tx, prm)
})
success = err == nil
return metaerr.Wrap(err)
}
-func (db *DB) iterateOverObjectsInContainer(ctx context.Context, tx *bbolt.Tx, cidRaw []byte, prefix byte,
- containerID cid.ID, prm IterateOverObjectsInContainerPrm,
-) error {
- bkt := tx.Bucket(prm.BucketName)
+func (db *DB) iterateOverObjectsInContainer(ctx context.Context, tx *bbolt.Tx, prm IterateOverObjectsInContainerPrm) error {
+ var prefix byte
+ switch prm.ObjectType {
+ case objectSDK.TypeRegular:
+ prefix = primaryPrefix
+ case objectSDK.TypeLock:
+ prefix = lockersPrefix
+ case objectSDK.TypeTombstone:
+ prefix = tombstonePrefix
+ default:
+ return nil
+ }
+ bucketName := []byte{prefix}
+ bucketName = append(bucketName, prm.ContainerID[:]...)
+
+ bkt := tx.Bucket(bucketName)
if bkt == nil {
return nil
}
@@ -380,32 +399,19 @@ func (db *DB) iterateOverObjectsInContainer(ctx context.Context, tx *bbolt.Tx, c
c := bkt.Cursor()
k, v := c.First()
- var objType objectSDK.Type
-
- switch prefix {
- case primaryPrefix:
- objType = objectSDK.TypeRegular
- case lockersPrefix:
- objType = objectSDK.TypeLock
- case tombstonePrefix:
- objType = objectSDK.TypeTombstone
- default:
- return nil
- }
-
for ; k != nil; k, v = c.Next() {
var obj oid.ID
if err := obj.Decode(k); err != nil {
break
}
- if inGraveyardWithKey(append(cidRaw, k...), graveyardBkt, garbageBkt) > 0 {
+ if inGraveyardWithKey(append(prm.ContainerID[:], k...), graveyardBkt, garbageBkt) > 0 {
continue
}
var isLinkingObj bool
var ecInfo *objectcore.ECInfo
- if objType == objectSDK.TypeRegular {
+ if prm.ObjectType == objectSDK.TypeRegular {
var o objectSDK.Object
if err := o.Unmarshal(v); err != nil {
return err
@@ -422,9 +428,9 @@ func (db *DB) iterateOverObjectsInContainer(ctx context.Context, tx *bbolt.Tx, c
}
var a oid.Address
- a.SetContainer(containerID)
+ a.SetContainer(prm.ContainerID)
a.SetObject(obj)
- objInfo := objectcore.Info{Address: a, Type: objType, IsLinkingObject: isLinkingObj, ECInfo: ecInfo}
+ objInfo := objectcore.Info{Address: a, Type: prm.ObjectType, IsLinkingObject: isLinkingObj, ECInfo: ecInfo}
err := prm.Handler(ctx, &objInfo)
if err != nil {
return err
@@ -433,8 +439,8 @@ func (db *DB) iterateOverObjectsInContainer(ctx context.Context, tx *bbolt.Tx, c
return nil
}
-// CountAliveObjectsInBucket count objects in bucket which aren't in graveyard or garbage.
-func (db *DB) CountAliveObjectsInBucket(ctx context.Context, prm CountAliveObjectsInBucketPrm) (uint64, error) {
+// CountAliveObjectsInContainer count objects in bucket which aren't in graveyard or garbage.
+func (db *DB) CountAliveObjectsInContainer(ctx context.Context, prm CountAliveObjectsInContainerPrm) (uint64, error) {
var (
startedAt = time.Now()
success = false
@@ -452,14 +458,22 @@ func (db *DB) CountAliveObjectsInBucket(ctx context.Context, prm CountAliveObjec
return 0, ErrDegradedMode
}
- if len(prm.BucketName) != bucketKeySize {
+ var prefix byte
+ switch prm.ObjectType {
+ case objectSDK.TypeRegular:
+ prefix = primaryPrefix
+ case objectSDK.TypeLock:
+ prefix = lockersPrefix
+ case objectSDK.TypeTombstone:
+ prefix = tombstonePrefix
+ default:
return 0, nil
}
-
- cidRaw := prm.BucketName[1:bucketKeySize]
+ bucketName := []byte{prefix}
+ bucketName = append(bucketName, prm.ContainerID[:]...)
var count uint64
err := db.boltDB.View(func(tx *bbolt.Tx) error {
- bkt := tx.Bucket(prm.BucketName)
+ bkt := tx.Bucket(bucketName)
if bkt == nil {
return nil
}
@@ -468,7 +482,7 @@ func (db *DB) CountAliveObjectsInBucket(ctx context.Context, prm CountAliveObjec
c := bkt.Cursor()
k, _ := c.First()
for ; k != nil; k, _ = c.Next() {
- if inGraveyardWithKey(append(cidRaw, k...), graveyardBkt, garbageBkt) > 0 {
+ if inGraveyardWithKey(append(prm.ContainerID[:], k...), graveyardBkt, garbageBkt) > 0 {
continue
}
count++
diff --git a/pkg/local_object_storage/metabase/list_test.go b/pkg/local_object_storage/metabase/list_test.go
index bc1726bd6..203802ec0 100644
--- a/pkg/local_object_storage/metabase/list_test.go
+++ b/pkg/local_object_storage/metabase/list_test.go
@@ -8,6 +8,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
+ cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
@@ -219,3 +220,59 @@ func metaListWithCursor(db *meta.DB, count uint32, cursor *meta.Cursor) ([]objec
r, err := db.ListWithCursor(context.Background(), listPrm)
return r.AddressList(), r.Cursor(), err
}
+
+func TestIterateOver(t *testing.T) {
+ t.Parallel()
+
+ db := newDB(t)
+ defer func() { require.NoError(t, db.Close()) }()
+
+ const total uint64 = 5
+ for _, typ := range []objectSDK.Type{objectSDK.TypeRegular, objectSDK.TypeTombstone, objectSDK.TypeLock} {
+ var expected []*objectSDK.Object
+ // fill metabase with objects
+ cid := cidtest.ID()
+ for range total {
+ obj := testutil.GenerateObjectWithCID(cid)
+ obj.SetType(typ)
+ err := metaPut(db, obj, nil)
+ require.NoError(t, err)
+ expected = append(expected, obj)
+ }
+
+ var metaIter meta.IterateOverObjectsInContainerPrm
+ var count uint64
+ metaIter.Handler = func(context.Context, *object.Info) error {
+ count++
+ return nil
+ }
+ metaIter.ContainerID = cid
+ metaIter.ObjectType = typ
+ err := db.IterateOverObjectsInContainer(context.Background(), metaIter)
+ require.NoError(t, err)
+ require.Equal(t, total, count)
+
+ var metaCount meta.CountAliveObjectsInContainerPrm
+ metaCount.ContainerID = cid
+ metaCount.ObjectType = typ
+ res, err := db.CountAliveObjectsInContainer(context.Background(), metaCount)
+ require.NoError(t, err)
+ require.Equal(t, res, total)
+
+ err = metaDelete(db, object.AddressOf(expected[0]), object.AddressOf(expected[1]))
+ require.NoError(t, err)
+
+ res, err = db.CountAliveObjectsInContainer(context.Background(), metaCount)
+ require.NoError(t, err)
+ require.Equal(t, uint64(3), res)
+ }
+ var count int
+ var metaPrm meta.IterateOverContainersPrm
+ metaPrm.Handler = func(context.Context, objectSDK.Type, cidSDK.ID) error {
+ count++
+ return nil
+ }
+ err := db.IterateOverContainers(context.Background(), metaPrm)
+ require.NoError(t, err)
+ require.Equal(t, 3, count)
+}
diff --git a/pkg/local_object_storage/shard/list.go b/pkg/local_object_storage/shard/list.go
index f5d633b77..8d09974b8 100644
--- a/pkg/local_object_storage/shard/list.go
+++ b/pkg/local_object_storage/shard/list.go
@@ -37,21 +37,25 @@ func (r ListContainersRes) Containers() []cid.ID {
// IterateOverContainersPrm contains parameters for IterateOverContainers operation.
type IterateOverContainersPrm struct {
// Handler function executed upon containers in db.
- Handler func(context.Context, []byte, cid.ID) error
+ Handler func(context.Context, objectSDK.Type, cid.ID) error
}
// IterateOverObjectsInContainerPrm contains parameters for IterateOverObjectsInContainer operation.
type IterateOverObjectsInContainerPrm struct {
- // BucketName container's bucket name.
- BucketName []byte
+ // ObjectType type of objects to iterate over.
+ ObjectType objectSDK.Type
+ // ContainerID container for objects to iterate over.
+ ContainerID cid.ID
// Handler function executed upon objects in db.
Handler func(context.Context, *objectcore.Info) error
}
-// CountAliveObjectsInBucketPrm contains parameters for CountAliveObjectsInBucket operation.
-type CountAliveObjectsInBucketPrm struct {
- // BucketName container's bucket name.
- BucketName []byte
+// CountAliveObjectsInContainerPrm contains parameters for CountAliveObjectsInContainer operation.
+type CountAliveObjectsInContainerPrm struct {
+ // ObjectType type of objects to iterate over.
+ ObjectType objectSDK.Type
+ // ContainerID container for objects to iterate over.
+ ContainerID cid.ID
}
// ListWithCursorPrm contains parameters for ListWithCursor operation.
@@ -226,7 +230,8 @@ func (s *Shard) IterateOverObjectsInContainer(ctx context.Context, prm IterateOv
}
var metaPrm meta.IterateOverObjectsInContainerPrm
- metaPrm.BucketName = prm.BucketName
+ metaPrm.ContainerID = prm.ContainerID
+ metaPrm.ObjectType = prm.ObjectType
metaPrm.Handler = prm.Handler
err := s.metaBase.IterateOverObjectsInContainer(ctx, metaPrm)
if err != nil {
@@ -236,8 +241,8 @@ func (s *Shard) IterateOverObjectsInContainer(ctx context.Context, prm IterateOv
return nil
}
-// CountAliveObjectsInBucket count objects in bucket which aren't in graveyard or garbage.
-func (s *Shard) CountAliveObjectsInBucket(ctx context.Context, prm CountAliveObjectsInBucketPrm) (uint64, error) {
+// CountAliveObjectsInContainer count objects in bucket which aren't in graveyard or garbage.
+func (s *Shard) CountAliveObjectsInContainer(ctx context.Context, prm CountAliveObjectsInContainerPrm) (uint64, error) {
_, span := tracing.StartSpanFromContext(ctx, "shard.CountAliveObjectsInBucket")
defer span.End()
@@ -248,9 +253,10 @@ func (s *Shard) CountAliveObjectsInBucket(ctx context.Context, prm CountAliveObj
return 0, ErrDegradedMode
}
- var metaPrm meta.CountAliveObjectsInBucketPrm
- metaPrm.BucketName = prm.BucketName
- count, err := s.metaBase.CountAliveObjectsInBucket(ctx, metaPrm)
+ var metaPrm meta.CountAliveObjectsInContainerPrm
+ metaPrm.ObjectType = prm.ObjectType
+ metaPrm.ContainerID = prm.ContainerID
+ count, err := s.metaBase.CountAliveObjectsInContainer(ctx, metaPrm)
if err != nil {
return 0, fmt.Errorf("could not count alive objects in bucket: %w", err)
}
From 4190fba86d575ba6c05aeb919ae5bc30572ecbd2 Mon Sep 17 00:00:00 2001
From: Evgenii Stratonikov
Date: Tue, 8 Oct 2024 16:44:34 +0300
Subject: [PATCH 191/705] [#1425] Remove SetEACL-related code
Signed-off-by: Evgenii Stratonikov
---
.../modules/morph/container/container.go | 8 --
internal/logs/logs.go | 3 -
.../processors/container/handlers.go | 4 -
.../processors/container/processor.go | 10 ---
pkg/morph/client/container/client.go | 1 -
pkg/morph/event/container/eacl.go | 51 -------------
pkg/morph/event/container/eacl_notary.go | 75 -------------------
pkg/services/container/morph/executor_test.go | 4 -
8 files changed, 156 deletions(-)
delete mode 100644 pkg/morph/event/container/eacl.go
delete mode 100644 pkg/morph/event/container/eacl_notary.go
diff --git a/cmd/frostfs-adm/internal/modules/morph/container/container.go b/cmd/frostfs-adm/internal/modules/morph/container/container.go
index a66438975..e280bc634 100644
--- a/cmd/frostfs-adm/internal/modules/morph/container/container.go
+++ b/cmd/frostfs-adm/internal/modules/morph/container/container.go
@@ -159,9 +159,6 @@ func dumpSingleContainer(bw *io.BufBinWriter, ch util.Uint160, inv *invoker.Invo
if err != nil {
return nil, fmt.Errorf("%w: %v", errInvalidContainerResponse, err)
}
- if len(ea.Value) != 0 {
- cnt.EACL = ea
- }
return cnt, nil
}
@@ -258,10 +255,6 @@ func restoreOrPutContainers(containers []Container, isOK func([]byte) bool, cmd
func putContainer(bw *io.BufBinWriter, ch util.Uint160, cnt Container) {
emit.AppCall(bw.BinWriter, ch, "put", callflag.All,
cnt.Value, cnt.Signature, cnt.PublicKey, cnt.Token)
- if ea := cnt.EACL; ea != nil {
- emit.AppCall(bw.BinWriter, ch, "setEACL", callflag.All,
- ea.Value, ea.Signature, ea.PublicKey, ea.Token)
- }
}
func isContainerRestored(cmd *cobra.Command, wCtx *helper.InitializeContext, containerHash util.Uint160, bw *io.BufBinWriter, hashValue util.Uint256) (bool, error) {
@@ -322,7 +315,6 @@ type Container struct {
Signature []byte `json:"signature"`
PublicKey []byte `json:"public_key"`
Token []byte `json:"token"`
- EACL *EACL `json:"eacl"`
}
// EACL represents extended ACL struct in contract storage.
diff --git a/internal/logs/logs.go b/internal/logs/logs.go
index 84bd023f1..ca783a39d 100644
--- a/internal/logs/logs.go
+++ b/internal/logs/logs.go
@@ -308,9 +308,6 @@ const (
ContainerNonAlphabetModeIgnoreContainerDelete = "non alphabet mode, ignore container delete"
ContainerDeleteContainerCheckFailed = "delete container check failed"
ContainerCouldNotApproveDeleteContainer = "could not approve delete container"
- ContainerNonAlphabetModeIgnoreSetEACL = "non alphabet mode, ignore set EACL"
- ContainerSetEACLCheckFailed = "set EACL check failed"
- ContainerCouldNotApproveSetEACL = "could not approve set EACL"
FrostFSNonAlphabetModeIgnoreConfig = "non alphabet mode, ignore config"
FrostFSCantRelaySetConfigEvent = "can't relay set config event"
FrostFSFrostfsWorkerPool = "frostfs worker pool"
diff --git a/pkg/innerring/processors/container/handlers.go b/pkg/innerring/processors/container/handlers.go
index 8bb89abe2..a54f3c772 100644
--- a/pkg/innerring/processors/container/handlers.go
+++ b/pkg/innerring/processors/container/handlers.go
@@ -48,7 +48,3 @@ func (cp *Processor) handleDelete(ev event.Event) {
zap.Int("capacity", cp.pool.Cap()))
}
}
-
-func (cp *Processor) handleSetEACL(_ event.Event) {
- cp.log.Warn(logs.SkipDeprecatedNotification, zap.String("type", "set EACL"))
-}
diff --git a/pkg/innerring/processors/container/processor.go b/pkg/innerring/processors/container/processor.go
index 8fd9edfb8..a6fbdc707 100644
--- a/pkg/innerring/processors/container/processor.go
+++ b/pkg/innerring/processors/container/processor.go
@@ -157,11 +157,6 @@ func (cp *Processor) ListenerNotaryParsers() []event.NotaryParserInfo {
p.SetParser(containerEvent.ParseDeleteNotary)
pp = append(pp, p)
- // set EACL
- p.SetRequestType(containerEvent.SetEACLNotaryEvent)
- p.SetParser(containerEvent.ParseSetEACLNotary)
- pp = append(pp, p)
-
return pp
}
@@ -190,10 +185,5 @@ func (cp *Processor) ListenerNotaryHandlers() []event.NotaryHandlerInfo {
h.SetHandler(cp.handleDelete)
hh = append(hh, h)
- // set eACL
- h.SetRequestType(containerEvent.SetEACLNotaryEvent)
- h.SetHandler(cp.handleSetEACL)
- hh = append(hh, h)
-
return hh
}
diff --git a/pkg/morph/client/container/client.go b/pkg/morph/client/container/client.go
index 9dd3a337b..fc892aafb 100644
--- a/pkg/morph/client/container/client.go
+++ b/pkg/morph/client/container/client.go
@@ -28,7 +28,6 @@ const (
listMethod = "list"
containersOfMethod = "containersOf"
eaclMethod = "eACL"
- setEACLMethod = "setEACL"
deletionInfoMethod = "deletionInfo"
startEstimationMethod = "startContainerEstimation"
diff --git a/pkg/morph/event/container/eacl.go b/pkg/morph/event/container/eacl.go
deleted file mode 100644
index 4168d8842..000000000
--- a/pkg/morph/event/container/eacl.go
+++ /dev/null
@@ -1,51 +0,0 @@
-package container
-
-import (
- "github.com/nspcc-dev/neo-go/pkg/network/payload"
-)
-
-// SetEACL represents structure of notification about
-// modified eACL table coming from FrostFS Container contract.
-type SetEACL struct {
- TableValue []byte
- SignatureValue []byte
- PublicKeyValue []byte
- TokenValue []byte
-
- // For notary notifications only.
- // Contains raw transactions of notary request.
- NotaryRequestValue *payload.P2PNotaryRequest
-}
-
-// MorphEvent implements Neo:Morph Event interface.
-func (SetEACL) MorphEvent() {}
-
-// Table returns returns eACL table in a binary FrostFS API format.
-func (x SetEACL) Table() []byte {
- return x.TableValue
-}
-
-// Signature returns signature of the binary table.
-func (x SetEACL) Signature() []byte {
- return x.SignatureValue
-}
-
-// PublicKey returns public keys of container
-// owner in a binary format.
-func (x SetEACL) PublicKey() []byte {
- return x.PublicKeyValue
-}
-
-// SessionToken returns binary token of the session
-// within which the eACL was set.
-func (x SetEACL) SessionToken() []byte {
- return x.TokenValue
-}
-
-// NotaryRequest returns raw notary request if notification
-// was received via notary service. Otherwise, returns nil.
-func (x SetEACL) NotaryRequest() *payload.P2PNotaryRequest {
- return x.NotaryRequestValue
-}
-
-const expectedItemNumEACL = 4
diff --git a/pkg/morph/event/container/eacl_notary.go b/pkg/morph/event/container/eacl_notary.go
deleted file mode 100644
index a4fe7c966..000000000
--- a/pkg/morph/event/container/eacl_notary.go
+++ /dev/null
@@ -1,75 +0,0 @@
-package container
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
- "github.com/nspcc-dev/neo-go/pkg/vm/opcode"
-)
-
-func (x *SetEACL) setTable(v []byte) {
- if v != nil {
- x.TableValue = v
- }
-}
-
-func (x *SetEACL) setSignature(v []byte) {
- if v != nil {
- x.SignatureValue = v
- }
-}
-
-func (x *SetEACL) setPublicKey(v []byte) {
- if v != nil {
- x.PublicKeyValue = v
- }
-}
-
-func (x *SetEACL) setToken(v []byte) {
- if v != nil {
- x.TokenValue = v
- }
-}
-
-var setEACLFieldSetters = []func(*SetEACL, []byte){
- // order on stack is reversed
- (*SetEACL).setToken,
- (*SetEACL).setPublicKey,
- (*SetEACL).setSignature,
- (*SetEACL).setTable,
-}
-
-const (
- // SetEACLNotaryEvent is method name for container EACL operations
- // in `Container` contract. Is used as identificator for notary
- // EACL changing requests.
- SetEACLNotaryEvent = "setEACL"
-)
-
-// ParseSetEACLNotary from NotaryEvent into container event structure.
-func ParseSetEACLNotary(ne event.NotaryEvent) (event.Event, error) {
- var (
- ev SetEACL
- currentOp opcode.Opcode
- )
-
- fieldNum := 0
-
- for _, op := range ne.Params() {
- currentOp = op.Code()
-
- switch {
- case opcode.PUSHDATA1 <= currentOp && currentOp <= opcode.PUSHDATA4:
- if fieldNum == expectedItemNumEACL {
- return nil, event.UnexpectedArgNumErr(SetEACLNotaryEvent)
- }
-
- setEACLFieldSetters[fieldNum](&ev, op.Param())
- fieldNum++
- default:
- return nil, event.UnexpectedOpcode(SetEACLNotaryEvent, op.Code())
- }
- }
-
- ev.NotaryRequestValue = ne.Raw()
-
- return ev, nil
-}
diff --git a/pkg/services/container/morph/executor_test.go b/pkg/services/container/morph/executor_test.go
index 560c69232..c64310eb3 100644
--- a/pkg/services/container/morph/executor_test.go
+++ b/pkg/services/container/morph/executor_test.go
@@ -32,10 +32,6 @@ func (m mock) Delete(_ containerCore.RemovalWitness) error {
return nil
}
-func (m mock) PutEACL(_ containerCore.EACL) error {
- return nil
-}
-
func TestInvalidToken(t *testing.T) {
m := mock{}
e := containerSvcMorph.NewExecutor(m, m)
From cc5360a57851e27dd51d72cf0ff5fa6ac44aba2f Mon Sep 17 00:00:00 2001
From: Evgenii Stratonikov
Date: Tue, 8 Oct 2024 16:46:41 +0300
Subject: [PATCH 192/705] [#1425] morph/event: Rename eacl_test.go
Signed-off-by: Evgenii Stratonikov
---
pkg/morph/event/container/{eacl_test.go => util_test.go} | 0
1 file changed, 0 insertions(+), 0 deletions(-)
rename pkg/morph/event/container/{eacl_test.go => util_test.go} (100%)
diff --git a/pkg/morph/event/container/eacl_test.go b/pkg/morph/event/container/util_test.go
similarity index 100%
rename from pkg/morph/event/container/eacl_test.go
rename to pkg/morph/event/container/util_test.go
From 94302235d03ad147c9d42791de39f75aaa7fe7f9 Mon Sep 17 00:00:00 2001
From: Evgenii Stratonikov
Date: Wed, 9 Oct 2024 10:03:58 +0300
Subject: [PATCH 193/705] [#1425] adm: Remove eACL fetching from
dump-containers
Signed-off-by: Evgenii Stratonikov
---
.../modules/morph/container/container.go | 62 +------------------
1 file changed, 2 insertions(+), 60 deletions(-)
diff --git a/cmd/frostfs-adm/internal/modules/morph/container/container.go b/cmd/frostfs-adm/internal/modules/morph/container/container.go
index e280bc634..6f08d1655 100644
--- a/cmd/frostfs-adm/internal/modules/morph/container/container.go
+++ b/cmd/frostfs-adm/internal/modules/morph/container/container.go
@@ -139,13 +139,12 @@ func dumpContainers(cmd *cobra.Command, _ []string) error {
func dumpSingleContainer(bw *io.BufBinWriter, ch util.Uint160, inv *invoker.Invoker, id []byte) (*Container, error) {
bw.Reset()
emit.AppCall(bw.BinWriter, ch, "get", callflag.All, id)
- emit.AppCall(bw.BinWriter, ch, "eACL", callflag.All, id)
res, err := inv.Run(bw.Bytes())
if err != nil {
return nil, fmt.Errorf("can't get container info: %w", err)
}
- if len(res.Stack) != 2 {
- return nil, fmt.Errorf("%w: expected 2 items on stack", errInvalidContainerResponse)
+ if len(res.Stack) != 1 {
+ return nil, fmt.Errorf("%w: expected 1 items on stack", errInvalidContainerResponse)
}
cnt := new(Container)
@@ -154,11 +153,6 @@ func dumpSingleContainer(bw *io.BufBinWriter, ch util.Uint160, inv *invoker.Invo
return nil, fmt.Errorf("%w: %v", errInvalidContainerResponse, err)
}
- ea := new(EACL)
- err = ea.FromStackItem(res.Stack[1])
- if err != nil {
- return nil, fmt.Errorf("%w: %v", errInvalidContainerResponse, err)
- }
return cnt, nil
}
@@ -317,14 +311,6 @@ type Container struct {
Token []byte `json:"token"`
}
-// EACL represents extended ACL struct in contract storage.
-type EACL struct {
- Value []byte `json:"value"`
- Signature []byte `json:"signature"`
- PublicKey []byte `json:"public_key"`
- Token []byte `json:"token"`
-}
-
// ToStackItem implements stackitem.Convertible.
func (c *Container) ToStackItem() (stackitem.Item, error) {
return stackitem.NewStruct([]stackitem.Item{
@@ -369,50 +355,6 @@ func (c *Container) FromStackItem(item stackitem.Item) error {
return nil
}
-// ToStackItem implements stackitem.Convertible.
-func (c *EACL) ToStackItem() (stackitem.Item, error) {
- return stackitem.NewStruct([]stackitem.Item{
- stackitem.NewByteArray(c.Value),
- stackitem.NewByteArray(c.Signature),
- stackitem.NewByteArray(c.PublicKey),
- stackitem.NewByteArray(c.Token),
- }), nil
-}
-
-// FromStackItem implements stackitem.Convertible.
-func (c *EACL) FromStackItem(item stackitem.Item) error {
- arr, ok := item.Value().([]stackitem.Item)
- if !ok || len(arr) != 4 {
- return errors.New("invalid stack item type")
- }
-
- value, err := arr[0].TryBytes()
- if err != nil {
- return errors.New("invalid eACL value")
- }
-
- sig, err := arr[1].TryBytes()
- if err != nil {
- return errors.New("invalid eACL signature")
- }
-
- pub, err := arr[2].TryBytes()
- if err != nil {
- return errors.New("invalid eACL public key")
- }
-
- tok, err := arr[3].TryBytes()
- if err != nil {
- return errors.New("invalid eACL token")
- }
-
- c.Value = value
- c.Signature = sig
- c.PublicKey = pub
- c.Token = tok
- return nil
-}
-
// getCIDFilterFunc returns filtering function for container IDs.
// Raw byte slices are used because it works with structures returned
// from contract.
From 02bb7159a54a9522ad0bc97d1a5456f5cfc425e4 Mon Sep 17 00:00:00 2001
From: Evgenii Stratonikov
Date: Wed, 9 Oct 2024 10:50:30 +0300
Subject: [PATCH 194/705] [#1425] services/tree: Remove eACL processing
Signed-off-by: Evgenii Stratonikov
---
cmd/frostfs-node/tree.go | 1 -
pkg/services/tree/options.go | 9 --
pkg/services/tree/signature.go | 137 +------------------------
pkg/services/tree/signature_test.go | 151 ++++++++++++++++++----------
4 files changed, 100 insertions(+), 198 deletions(-)
diff --git a/cmd/frostfs-node/tree.go b/cmd/frostfs-node/tree.go
index d22e510de..192f08471 100644
--- a/cmd/frostfs-node/tree.go
+++ b/cmd/frostfs-node/tree.go
@@ -54,7 +54,6 @@ func initTreeService(c *cfg) {
cli: c.shared.cnrClient,
}),
tree.WithFrostfsidSubjectProvider(c.shared.frostfsidClient),
- tree.WithEACLSource(c.cfgObject.eaclSource),
tree.WithNetmapSource(c.netMapSource),
tree.WithPrivateKey(&c.key.PrivateKey),
tree.WithLogger(c.log),
diff --git a/pkg/services/tree/options.go b/pkg/services/tree/options.go
index 6a20fe5cc..1db5607f6 100644
--- a/pkg/services/tree/options.go
+++ b/pkg/services/tree/options.go
@@ -33,7 +33,6 @@ type cfg struct {
nmSource netmap.Source
cnrSource ContainerSource
frostfsidSubjectProvider frostfsidcore.SubjectProvider
- eaclSource container.EACLSource
forest pilorama.Forest
// replication-related parameters
replicatorChannelCapacity int
@@ -65,14 +64,6 @@ func WithFrostfsidSubjectProvider(provider frostfsidcore.SubjectProvider) Option
}
}
-// WithEACLSource sets a eACL table source for a tree service.
-// This option is required.
-func WithEACLSource(src container.EACLSource) Option {
- return func(c *cfg) {
- c.eaclSource = src
- }
-}
-
// WithNetmapSource sets a netmap source for a tree service.
// This option is required.
func WithNetmapSource(src netmap.Source) Option {
diff --git a/pkg/services/tree/signature.go b/pkg/services/tree/signature.go
index 58cab659f..305adf2d7 100644
--- a/pkg/services/tree/signature.go
+++ b/pkg/services/tree/signature.go
@@ -9,10 +9,8 @@ import (
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
core "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
frostfscrypto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto"
@@ -20,7 +18,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
- "go.uber.org/zap"
)
type message interface {
@@ -30,16 +27,11 @@ type message interface {
SetSignature(*Signature)
}
-func basicACLErr(op acl.Op) error {
- return fmt.Errorf("access to operation %s is denied by basic ACL check", op)
-}
-
func eACLErr(op eacl.Operation, err error) error {
return fmt.Errorf("access to operation %s is denied by extended ACL check: %w", op, err)
}
var (
- errBearerWrongOwner = errors.New("bearer token must be signed by the container owner")
errBearerWrongContainer = errors.New("bearer token is created for another container")
errBearerSignature = errors.New("invalid bearer token signature")
)
@@ -77,56 +69,7 @@ func (s *Service) verifyClient(ctx context.Context, req message, cid cidSDK.ID,
return fmt.Errorf("can't get request role: %w", err)
}
- basicACL := cnr.Value.BasicACL()
- // Basic ACL mask can be unset, if a container operations are performed
- // with strict APE checks only.
- //
- // FIXME(@aarifullin): tree service temporiraly performs APE checks on
- // object verbs, because tree verbs have not been introduced yet.
- if basicACL == 0x0 {
- return s.checkAPE(ctx, bt, cnr, cid, op, role, pubKey)
- }
-
- if !basicACL.IsOpAllowed(op, role) {
- return basicACLErr(op)
- }
-
- if !basicACL.Extendable() {
- return nil
- }
-
- var useBearer bool
- if len(rawBearer) != 0 {
- if !basicACL.AllowedBearerRules(op) {
- s.log.Debug(logs.TreeBearerPresentedButNotAllowedByACL,
- zap.String("cid", cid.EncodeToString()),
- zap.Stringer("op", op),
- )
- } else {
- useBearer = true
- }
- }
-
- var tb eacl.Table
- signer := req.GetSignature().GetKey()
- if useBearer && !bt.Impersonate() {
- if !bearer.ResolveIssuer(*bt).Equals(cnr.Value.Owner()) {
- return eACLErr(eaclOp, errBearerWrongOwner)
- }
- tb = bt.EACLTable()
- } else {
- tbCore, err := s.eaclSource.GetEACL(cid)
- if err != nil {
- return handleGetEACLError(err)
- }
- tb = *tbCore.Value
-
- if useBearer && bt.Impersonate() {
- signer = bt.SigningKeyBytes()
- }
- }
-
- return checkEACL(tb, signer, eACLRole(role), eaclOp)
+ return s.checkAPE(ctx, bt, cnr, cid, op, role, pubKey)
}
// Returns true iff the operation is read-only and request was signed
@@ -168,14 +111,6 @@ func parseBearer(rawBearer []byte, cid cidSDK.ID, eaclOp eacl.Operation) (*beare
return bt, nil
}
-func handleGetEACLError(err error) error {
- if client.IsErrEACLNotFound(err) {
- return nil
- }
-
- return fmt.Errorf("get eACL table: %w", err)
-}
-
func verifyMessage(m message) error {
binBody, err := m.ReadSignedData(nil)
if err != nil {
@@ -260,73 +195,3 @@ func eACLOp(op acl.Op) eacl.Operation {
panic(fmt.Sprintf("unexpected tree service ACL operation: %s", op))
}
}
-
-func eACLRole(role acl.Role) eacl.Role {
- switch role {
- case acl.RoleOwner:
- return eacl.RoleUser
- case acl.RoleOthers:
- return eacl.RoleOthers
- default:
- panic(fmt.Sprintf("unexpected tree service ACL role: %s", role))
- }
-}
-
-var (
- errDENY = errors.New("DENY eACL rule")
- errNoAllowRules = errors.New("not found allowing rules for the request")
-)
-
-// checkEACL searches for the eACL rules that could be applied to the request
-// (a tuple of a signer key, his FrostFS role and a request operation).
-// It does not filter the request by the filters of the eACL table since tree
-// requests do not contain any "object" information that could be filtered and,
-// therefore, filtering leads to unexpected results.
-// The code was copied with the minor updates from the SDK repo:
-// https://github.com/nspcc-dev/frostfs-sdk-go/blob/43a57d42dd50dc60465bfd3482f7f12bcfcf3411/eacl/validator.go#L28.
-func checkEACL(tb eacl.Table, signer []byte, role eacl.Role, op eacl.Operation) error {
- for _, record := range tb.Records() {
- // check type of operation
- if record.Operation() != op {
- continue
- }
-
- // check target
- if !targetMatches(record, role, signer) {
- continue
- }
-
- switch a := record.Action(); a {
- case eacl.ActionAllow:
- return nil
- case eacl.ActionDeny:
- return eACLErr(op, errDENY)
- default:
- return eACLErr(op, fmt.Errorf("unexpected action: %s", a))
- }
- }
-
- return eACLErr(op, errNoAllowRules)
-}
-
-func targetMatches(rec eacl.Record, role eacl.Role, signer []byte) bool {
- for _, target := range rec.Targets() {
- // check public key match
- if pubs := target.BinaryKeys(); len(pubs) != 0 {
- for _, key := range pubs {
- if bytes.Equal(key, signer) {
- return true
- }
- }
-
- continue
- }
-
- // check target group match
- if role == target.Role() {
- return true
- }
- }
-
- return false
-}
diff --git a/pkg/services/tree/signature_test.go b/pkg/services/tree/signature_test.go
index 3c3ebfe89..939ff170d 100644
--- a/pkg/services/tree/signature_test.go
+++ b/pkg/services/tree/signature_test.go
@@ -4,22 +4,30 @@ import (
"context"
"crypto/ecdsa"
"crypto/sha256"
+ "encoding/hex"
"errors"
"testing"
aclV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/acl"
+ "git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client"
containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
+ checkercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/common/ape"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ape"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
- eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
+ "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
+ "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
+ "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine/inmemory"
+ "git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
+ "github.com/nspcc-dev/neo-go/pkg/util"
"github.com/stretchr/testify/require"
)
@@ -27,6 +35,34 @@ type dummyNetmapSource struct {
netmap.Source
}
+type dummySubjectProvider struct {
+ subjects map[util.Uint160]client.SubjectExtended
+}
+
+func (s dummySubjectProvider) GetSubject(addr util.Uint160) (*client.Subject, error) {
+ res := s.subjects[addr]
+ return &client.Subject{
+ PrimaryKey: res.PrimaryKey,
+ AdditionalKeys: res.AdditionalKeys,
+ Namespace: res.Namespace,
+ Name: res.Name,
+ KV: res.KV,
+ }, nil
+}
+
+func (s dummySubjectProvider) GetSubjectExtended(addr util.Uint160) (*client.SubjectExtended, error) {
+ res := s.subjects[addr]
+ return &res, nil
+}
+
+type dummyEpochSource struct {
+ epoch uint64
+}
+
+func (s dummyEpochSource) CurrentEpoch() uint64 {
+ return s.epoch
+}
+
type dummyContainerSource map[string]*containercore.Container
func (s dummyContainerSource) List() ([]cid.ID, error) {
@@ -57,16 +93,6 @@ func (s dummyContainerSource) DeletionInfo(id cid.ID) (*containercore.DelInfo, e
return &containercore.DelInfo{}, nil
}
-type dummyEACLSource map[string]*containercore.EACL
-
-func (s dummyEACLSource) GetEACL(id cid.ID) (*containercore.EACL, error) {
- cntEACL, ok := s[id.String()]
- if !ok {
- return nil, errors.New("container not found")
- }
- return cntEACL, nil
-}
-
func testContainer(owner user.ID) container.Container {
var r netmapSDK.ReplicaDescriptor
r.SetNumberOfObjects(1)
@@ -81,6 +107,8 @@ func testContainer(owner user.ID) container.Container {
return cnt
}
+const currentEpoch = 123
+
func TestMessageSign(t *testing.T) {
privs := make([]*keys.PrivateKey, 4)
for i := range privs {
@@ -99,6 +127,15 @@ func TestMessageSign(t *testing.T) {
Value: testContainer(ownerID),
}
+ e := inmemory.NewInMemoryLocalOverrides()
+ e.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.Target{
+ Type: engine.Container,
+ Name: cid1.EncodeToString(),
+ }, testChain(privs[0].PublicKey(), privs[1].PublicKey()))
+ frostfsidProvider := dummySubjectProvider{
+ subjects: make(map[util.Uint160]client.SubjectExtended),
+ }
+
s := &Service{
cfg: cfg{
log: test.NewLogger(t),
@@ -107,12 +144,10 @@ func TestMessageSign(t *testing.T) {
cnrSource: dummyContainerSource{
cid1.String(): cnr,
},
- eaclSource: dummyEACLSource{
- cid1.String(): &containercore.EACL{
- Value: testTable(cid1, privs[0].PublicKey(), privs[1].PublicKey()),
- },
- },
+ frostfsidSubjectProvider: frostfsidProvider,
+ state: dummyEpochSource{epoch: currentEpoch},
},
+ apeChecker: checkercore.New(e.LocalStorage(), e.MorphRuleChainStorage(), frostfsidProvider, dummyEpochSource{}),
}
rawCID1 := make([]byte, sha256.Size)
@@ -235,46 +270,58 @@ func TestMessageSign(t *testing.T) {
func testBearerToken(cid cid.ID, forPutGet, forGet *keys.PublicKey) bearer.Token {
var b bearer.Token
- b.SetEACLTable(*testTable(cid, forPutGet, forGet))
+ b.SetExp(currentEpoch + 1)
+ b.SetAPEOverride(bearer.APEOverride{
+ Target: ape.ChainTarget{
+ TargetType: ape.TargetTypeContainer,
+ Name: cid.EncodeToString(),
+ },
+ Chains: []ape.Chain{{Raw: testChain(forPutGet, forGet).Bytes()}},
+ })
return b
}
-func testTable(cid cid.ID, forPutGet, forGet *keys.PublicKey) *eaclSDK.Table {
- tgtGet := eaclSDK.NewTarget()
- tgtGet.SetRole(eaclSDK.RoleUnknown)
- tgtGet.SetBinaryKeys([][]byte{forPutGet.Bytes(), forGet.Bytes()})
-
- rGet := eaclSDK.NewRecord()
- rGet.SetAction(eaclSDK.ActionAllow)
- rGet.SetOperation(eaclSDK.OperationGet)
- rGet.SetTargets(*tgtGet)
-
- tgtPut := eaclSDK.NewTarget()
- tgtPut.SetRole(eaclSDK.RoleUnknown)
- tgtPut.SetBinaryKeys([][]byte{forPutGet.Bytes()})
-
- rPut := eaclSDK.NewRecord()
- rPut.SetAction(eaclSDK.ActionAllow)
- rPut.SetOperation(eaclSDK.OperationPut)
- rPut.SetTargets(*tgtPut)
-
- tb := eaclSDK.NewTable()
- tb.AddRecord(rGet)
- tb.AddRecord(rPut)
-
- tgt := eaclSDK.NewTarget()
- tgt.SetRole(eaclSDK.RoleOthers)
-
- for _, op := range []eaclSDK.Operation{eaclSDK.OperationGet, eaclSDK.OperationPut} {
- r := eaclSDK.NewRecord()
- r.SetAction(eaclSDK.ActionDeny)
- r.SetTargets(*tgt)
- r.SetOperation(op)
- tb.AddRecord(r)
+func testChain(forPutGet, forGet *keys.PublicKey) *chain.Chain {
+ ruleGet := chain.Rule{
+ Status: chain.Allow,
+ Resources: chain.Resources{Names: []string{native.ResourceFormatAllObjects}},
+ Actions: chain.Actions{Names: []string{native.MethodGetObject}},
+ Any: true,
+ Condition: []chain.Condition{
+ {
+ Op: chain.CondStringEquals,
+ Kind: chain.KindRequest,
+ Key: native.PropertyKeyActorPublicKey,
+ Value: hex.EncodeToString(forPutGet.Bytes()),
+ },
+ {
+ Op: chain.CondStringEquals,
+ Kind: chain.KindRequest,
+ Key: native.PropertyKeyActorPublicKey,
+ Value: hex.EncodeToString(forGet.Bytes()),
+ },
+ },
+ }
+ rulePut := chain.Rule{
+ Status: chain.Allow,
+ Resources: chain.Resources{Names: []string{native.ResourceFormatAllObjects}},
+ Actions: chain.Actions{Names: []string{native.MethodPutObject}},
+ Any: true,
+ Condition: []chain.Condition{
+ {
+ Op: chain.CondStringEquals,
+ Kind: chain.KindRequest,
+ Key: native.PropertyKeyActorPublicKey,
+ Value: hex.EncodeToString(forPutGet.Bytes()),
+ },
+ },
}
- tb.SetCID(cid)
-
- return tb
+ return &chain.Chain{
+ Rules: []chain.Rule{
+ ruleGet,
+ rulePut,
+ },
+ }
}
From 11347602719a764179a74382076de4430936d7ad Mon Sep 17 00:00:00 2001
From: Evgenii Stratonikov
Date: Wed, 9 Oct 2024 10:55:48 +0300
Subject: [PATCH 195/705] [#1425] services/tree: Remove eACL mentions from
bearer token parsing errors
Signed-off-by: Evgenii Stratonikov
---
pkg/services/tree/signature.go | 30 ++++++------------------------
1 file changed, 6 insertions(+), 24 deletions(-)
diff --git a/pkg/services/tree/signature.go b/pkg/services/tree/signature.go
index 305adf2d7..20a629fcc 100644
--- a/pkg/services/tree/signature.go
+++ b/pkg/services/tree/signature.go
@@ -15,7 +15,6 @@ import (
cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
frostfscrypto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto"
frostfsecdsa "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto/ecdsa"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
)
@@ -27,10 +26,6 @@ type message interface {
SetSignature(*Signature)
}
-func eACLErr(op eacl.Operation, err error) error {
- return fmt.Errorf("access to operation %s is denied by extended ACL check: %w", op, err)
-}
-
var (
errBearerWrongContainer = errors.New("bearer token is created for another container")
errBearerSignature = errors.New("invalid bearer token signature")
@@ -57,11 +52,9 @@ func (s *Service) verifyClient(ctx context.Context, req message, cid cidSDK.ID,
return fmt.Errorf("can't get container %s: %w", cid, err)
}
- eaclOp := eACLOp(op)
-
- bt, err := parseBearer(rawBearer, cid, eaclOp)
+ bt, err := parseBearer(rawBearer, cid)
if err != nil {
- return err
+ return fmt.Errorf("access to operation %s is denied: %w", op, err)
}
role, pubKey, err := roleAndPubKeyFromReq(cnr, req, bt)
@@ -93,20 +86,20 @@ func (s *Service) isAuthorized(req message, op acl.Op) (bool, error) {
return false, nil
}
-func parseBearer(rawBearer []byte, cid cidSDK.ID, eaclOp eacl.Operation) (*bearer.Token, error) {
+func parseBearer(rawBearer []byte, cid cidSDK.ID) (*bearer.Token, error) {
if len(rawBearer) == 0 {
return nil, nil
}
bt := new(bearer.Token)
if err := bt.Unmarshal(rawBearer); err != nil {
- return nil, eACLErr(eaclOp, fmt.Errorf("invalid bearer token: %w", err))
+ return nil, fmt.Errorf("invalid bearer token: %w", err)
}
if !bt.AssertContainer(cid) {
- return nil, eACLErr(eaclOp, errBearerWrongContainer)
+ return nil, errBearerWrongContainer
}
if !bt.VerifySignature() {
- return nil, eACLErr(eaclOp, errBearerSignature)
+ return nil, errBearerSignature
}
return bt, nil
}
@@ -184,14 +177,3 @@ func roleAndPubKeyFromReq(cnr *core.Container, req message, bt *bearer.Token) (a
return role, pub, nil
}
-
-func eACLOp(op acl.Op) eacl.Operation {
- switch op {
- case acl.OpObjectGet:
- return eacl.OperationGet
- case acl.OpObjectPut:
- return eacl.OperationPut
- default:
- panic(fmt.Sprintf("unexpected tree service ACL operation: %s", op))
- }
-}
From dfb00083d07499a0e3d89076cc3f08729c00cb71 Mon Sep 17 00:00:00 2001
From: Evgenii Stratonikov
Date: Thu, 10 Oct 2024 14:57:39 +0300
Subject: [PATCH 196/705] [#1426] go.mod: Update sdk-go
Signed-off-by: Evgenii Stratonikov
---
go.mod | 2 +-
go.sum | 4 ++--
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/go.mod b/go.mod
index 91cc55a36..9a64f0e81 100644
--- a/go.mod
+++ b/go.mod
@@ -9,7 +9,7 @@ require (
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20240909114314-666d326cc573
- git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20241007135805-4c310ae1c7fa
+ git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20241010110344-99c5c5836509
git.frostfs.info/TrueCloudLab/hrw v1.2.1
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240814080254-96225afacb88
git.frostfs.info/TrueCloudLab/tzhash v1.8.0
diff --git a/go.sum b/go.sum
index 728592ea5..777d9b3ab 100644
--- a/go.sum
+++ b/go.sum
@@ -10,8 +10,8 @@ git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d/go.mod h1:7ZZq8iguY7qFsXajdHGmZd2AW4QbucyrJwhbsRfOfek=
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20240909114314-666d326cc573 h1:6qCcm1oqFbmf9C5AauXzrL5OPGnTbI9HoB/jAtD9274=
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20240909114314-666d326cc573/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g=
-git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20241007135805-4c310ae1c7fa h1:Jr8hXNNFECLhC7S45HuyQms4U/gim1xILoU3g4ZZnHg=
-git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20241007135805-4c310ae1c7fa/go.mod h1:jmb7yxzZota9jWbC10p+7YR+6wwJPBj7J/Fl5VDkXys=
+git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20241010110344-99c5c5836509 h1:5gtEq4bjVgAbTOrbEquspyM3s+qsMtkpGC5m9FtfImk=
+git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20241010110344-99c5c5836509/go.mod h1:jmb7yxzZota9jWbC10p+7YR+6wwJPBj7J/Fl5VDkXys=
git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc=
git.frostfs.info/TrueCloudLab/hrw v1.2.1/go.mod h1:C1Ygde2n843yTZEQ0FP69jYiuaYV0kriLvP4zm8JuvM=
git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20240726093631-5481339d6928 h1:LK3mCkNZkY48eBA9jnk1N0eQZLsZhOG+XYw4EBoKUjM=
From 5992ee901ae574d536a82bd7f1504852e17fb086 Mon Sep 17 00:00:00 2001
From: Alexander Chuprov
Date: Fri, 11 Oct 2024 11:33:36 +0300
Subject: [PATCH 197/705] [#1411] go.mod: Bump frostfs-contract to v0.20.0
Signed-off-by: Alexander Chuprov
---
go.mod | 2 +-
go.sum | 4 ++--
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/go.mod b/go.mod
index 9a64f0e81..1468c12b2 100644
--- a/go.mod
+++ b/go.mod
@@ -5,7 +5,7 @@ go 1.22
require (
code.gitea.io/sdk/gitea v0.17.1
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20241007120543-29c522d5d8a3
- git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240909072709-3e221b973a3c
+ git.frostfs.info/TrueCloudLab/frostfs-contract v0.20.0
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20240909114314-666d326cc573
diff --git a/go.sum b/go.sum
index 777d9b3ab..5ce81807a 100644
--- a/go.sum
+++ b/go.sum
@@ -2,8 +2,8 @@ code.gitea.io/sdk/gitea v0.17.1 h1:3jCPOG2ojbl8AcfaUCRYLT5MUcBMFwS0OSK2mA5Zok8=
code.gitea.io/sdk/gitea v0.17.1/go.mod h1:aCnBqhHpoEWA180gMbaCtdX9Pl6BWBAuuP2miadoTNM=
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20241007120543-29c522d5d8a3 h1:6QXNnfBgYx81UZsBdpPnQY+ZMSKGFbFc29wV7DJ/UG4=
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20241007120543-29c522d5d8a3/go.mod h1:F5GS7hRb62PUy5sTYDC4ajVdeffoAfjHSSHTKUJEaYU=
-git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240909072709-3e221b973a3c h1:9JmXxWcqWaOXModBwtj/WlPYOfSk2jRSDoRsDRYvgnA=
-git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240909072709-3e221b973a3c/go.mod h1:DVb1AqP323wiEwCtlfoJJTERF4Xji2L39X6T8kv26RE=
+git.frostfs.info/TrueCloudLab/frostfs-contract v0.20.0 h1:8Z5iPhieCrbcdhxBuY/Bajh6V5fki7Whh0b4S2zYJYU=
+git.frostfs.info/TrueCloudLab/frostfs-contract v0.20.0/go.mod h1:Y2Xorxc8SBO4phoek7n3XxaPZz5rIrFgDsU4TOjmlGA=
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSVCB8JNSfPG7Uk4r2oSk=
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0/go.mod h1:RUIKZATQLJ+TaYQa60X2fTDwfuhMfm8Ar60bQ5fr+vU=
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d h1:uJ/wvuMdepbkaV8XMS5uN9B0FQWMep0CttSuDZiDhq0=
From 42bf03e5cc04f2b6d67465608b842693171344ba Mon Sep 17 00:00:00 2001
From: Alexander Chuprov
Date: Fri, 11 Oct 2024 11:33:56 +0300
Subject: [PATCH 198/705] [#1411] adm/nns: Add 'delRecord'
Signed-off-by: Alexander Chuprov
---
.../internal/modules/morph/nns/record.go | 29 +++++++++++++++++++
.../internal/modules/morph/nns/root.go | 10 +++++++
2 files changed, 39 insertions(+)
diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/record.go b/cmd/frostfs-adm/internal/modules/morph/nns/record.go
index 0e217eb61..66bb1b94f 100644
--- a/cmd/frostfs-adm/internal/modules/morph/nns/record.go
+++ b/cmd/frostfs-adm/internal/modules/morph/nns/record.go
@@ -47,6 +47,19 @@ func initDelRecordsCmd() {
_ = cobra.MarkFlagRequired(delRecordsCmd.Flags(), nnsRecordTypeFlag)
}
+func initDelRecordCmd() {
+ Cmd.AddCommand(delRecordCmd)
+ delRecordCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
+ delRecordCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
+ delRecordCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc)
+ delRecordCmd.Flags().String(nnsRecordTypeFlag, "", nnsRecordTypeFlagDesc)
+ delRecordCmd.Flags().String(nnsRecordDataFlag, "", nnsRecordDataFlagDesc)
+
+ _ = cobra.MarkFlagRequired(delRecordCmd.Flags(), nnsNameFlag)
+ _ = cobra.MarkFlagRequired(delRecordCmd.Flags(), nnsRecordTypeFlag)
+ _ = cobra.MarkFlagRequired(delRecordCmd.Flags(), nnsRecordDataFlag)
+}
+
func addRecord(cmd *cobra.Command, _ []string) {
c, actor, _ := getRPCClient(cmd)
name, _ := cmd.Flags().GetString(nnsNameFlag)
@@ -115,6 +128,22 @@ func delRecords(cmd *cobra.Command, _ []string) {
cmd.Println("Records removed successfully")
}
+func delRecord(cmd *cobra.Command, _ []string) {
+ c, actor, _ := getRPCClient(cmd)
+ name, _ := cmd.Flags().GetString(nnsNameFlag)
+ data, _ := cmd.Flags().GetString(nnsRecordDataFlag)
+ recordType, _ := cmd.Flags().GetString(nnsRecordTypeFlag)
+ typ, err := getRecordType(recordType)
+ commonCmd.ExitOnErr(cmd, "unable to parse record type: %w", err)
+ h, vub, err := c.DeleteRecord(name, typ, data)
+ commonCmd.ExitOnErr(cmd, "unable to delete record: %w", err)
+
+ cmd.Println("Waiting for transaction to persist...")
+ _, err = actor.Wait(h, vub, err)
+ commonCmd.ExitOnErr(cmd, "delete records error: %w", err)
+ cmd.Println("Record removed successfully")
+}
+
func getRecordType(recordType string) (*big.Int, error) {
switch strings.ToUpper(recordType) {
case "A":
diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/root.go b/cmd/frostfs-adm/internal/modules/morph/nns/root.go
index 56774c292..9bdeaccd9 100644
--- a/cmd/frostfs-adm/internal/modules/morph/nns/root.go
+++ b/cmd/frostfs-adm/internal/modules/morph/nns/root.go
@@ -95,6 +95,15 @@ var (
},
Run: delRecords,
}
+ delRecordCmd = &cobra.Command{
+ Use: "delete-record",
+ Short: "Removes domain record with the specified type and data",
+ PreRun: func(cmd *cobra.Command, _ []string) {
+ _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
+ _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
+ },
+ Run: delRecord,
+ }
)
func init() {
@@ -106,4 +115,5 @@ func init() {
initAddRecordCmd()
initGetRecordsCmd()
initDelRecordsCmd()
+ initDelRecordCmd()
}
From acd6eb18151d7e2fec413d97e273b13076a9b4bb Mon Sep 17 00:00:00 2001
From: Anton Nikiforov
Date: Fri, 11 Oct 2024 15:40:01 +0300
Subject: [PATCH 199/705] [#1427] object: Fix `Put` for `EC` object when node
unavailable
There might be situation when context canceled earlier than traverser move to another part of the nodes.
To avoid this, need to wait for the result from concurrent put at each traverser iteration.
Signed-off-by: Anton Nikiforov
---
pkg/services/object/common/writer/ec.go | 20 +-
pkg/services/object/common/writer/ec_test.go | 191 +++++++++++++++++++
2 files changed, 205 insertions(+), 6 deletions(-)
create mode 100644 pkg/services/object/common/writer/ec_test.go
diff --git a/pkg/services/object/common/writer/ec.go b/pkg/services/object/common/writer/ec.go
index 6b6a14cc0..dffe52a6d 100644
--- a/pkg/services/object/common/writer/ec.go
+++ b/pkg/services/object/common/writer/ec.go
@@ -197,14 +197,15 @@ func (e *ECWriter) writeRawObject(ctx context.Context, obj *objectSDK.Object) er
if err != nil {
return err
}
+ partsProcessed := make([]atomic.Bool, len(parts))
objID, _ := obj.ID()
t, err := placement.NewTraverser(append(e.PlacementOpts, placement.ForObject(objID))...)
if err != nil {
return err
}
- eg, egCtx := errgroup.WithContext(ctx)
for {
+ eg, egCtx := errgroup.WithContext(ctx)
nodes := t.Next()
if len(nodes) == 0 {
break
@@ -216,13 +217,20 @@ func (e *ECWriter) writeRawObject(ctx context.Context, obj *objectSDK.Object) er
}
for idx := range parts {
- eg.Go(func() error {
- return e.writePart(egCtx, parts[idx], idx, nodes, visited)
- })
- t.SubmitSuccess()
+ if !partsProcessed[idx].Load() {
+ eg.Go(func() error {
+ err := e.writePart(egCtx, parts[idx], idx, nodes, visited)
+ if err == nil {
+ partsProcessed[idx].Store(true)
+ t.SubmitSuccess()
+ }
+ return err
+ })
+ }
}
+ err = eg.Wait()
}
- if err := eg.Wait(); err != nil {
+ if err != nil {
return errIncompletePut{
singleErr: err,
}
diff --git a/pkg/services/object/common/writer/ec_test.go b/pkg/services/object/common/writer/ec_test.go
new file mode 100644
index 000000000..32863d678
--- /dev/null
+++ b/pkg/services/object/common/writer/ec_test.go
@@ -0,0 +1,191 @@
+package writer
+
+import (
+ "bytes"
+ "context"
+ "crypto/rand"
+ "crypto/sha256"
+ "errors"
+ "fmt"
+ "strconv"
+ "testing"
+
+ rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
+ netmapcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
+ apiclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
+ usertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user/test"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version"
+ "git.frostfs.info/TrueCloudLab/tzhash/tz"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
+ "github.com/panjf2000/ants/v2"
+ "github.com/stretchr/testify/require"
+)
+
+type testPlacementBuilder struct {
+ vectors [][]netmap.NodeInfo
+}
+
+func (p *testPlacementBuilder) BuildPlacement(_ cid.ID, _ *oid.ID, _ netmap.PlacementPolicy) (
+ [][]netmap.NodeInfo, error,
+) {
+ arr := make([]netmap.NodeInfo, len(p.vectors[0]))
+ copy(arr, p.vectors[0])
+ return [][]netmap.NodeInfo{arr}, nil
+}
+
+type nmKeys struct{}
+
+func (nmKeys) IsLocalKey(_ []byte) bool {
+ return false
+}
+
+type clientConstructor struct {
+ vectors [][]netmap.NodeInfo
+}
+
+func (c clientConstructor) Get(info client.NodeInfo) (client.MultiAddressClient, error) {
+ if bytes.Equal(info.PublicKey(), c.vectors[0][0].PublicKey()) ||
+ bytes.Equal(info.PublicKey(), c.vectors[0][1].PublicKey()) {
+ return multiAddressClient{err: errors.New("node unavailable")}, nil
+ }
+ return multiAddressClient{}, nil
+}
+
+type multiAddressClient struct {
+ client.MultiAddressClient
+ err error
+}
+
+func (c multiAddressClient) ObjectPutSingle(_ context.Context, _ apiclient.PrmObjectPutSingle) (*apiclient.ResObjectPutSingle, error) {
+ if c.err != nil {
+ return nil, c.err
+ }
+ return &apiclient.ResObjectPutSingle{}, nil
+}
+
+func (c multiAddressClient) ReportError(error) {
+}
+
+func (multiAddressClient) RawForAddress(context.Context, network.Address, func(cli *rawclient.Client) error) error {
+ return nil
+}
+
+func TestECWriter(t *testing.T) {
+ // Create container with policy EC 1.1
+ cnr := container.Container{}
+ p1 := netmap.PlacementPolicy{}
+ p1.SetContainerBackupFactor(1)
+ x1 := netmap.ReplicaDescriptor{}
+ x1.SetECDataCount(1)
+ x1.SetECParityCount(1)
+ p1.AddReplicas(x1)
+ cnr.SetPlacementPolicy(p1)
+ cnr.SetAttribute("cnr", "cnr1")
+
+ cid := cidtest.ID()
+
+ // Create 4 nodes, 2 nodes for chunks,
+ // 2 nodes for the case when the first two will fail.
+ ns, _ := testNodeMatrix(t, []int{4})
+
+ data := make([]byte, 100)
+ _, _ = rand.Read(data)
+ ver := version.Current()
+
+ var csum checksum.Checksum
+ csum.SetSHA256(sha256.Sum256(data))
+
+ var csumTZ checksum.Checksum
+ csumTZ.SetTillichZemor(tz.Sum(csum.Value()))
+
+ obj := objectSDK.New()
+ obj.SetID(oidtest.ID())
+ obj.SetOwnerID(usertest.ID())
+ obj.SetContainerID(cid)
+ obj.SetVersion(&ver)
+ obj.SetPayload(data)
+ obj.SetPayloadSize(uint64(len(data)))
+ obj.SetPayloadChecksum(csum)
+ obj.SetPayloadHomomorphicHash(csumTZ)
+
+ // Builder return nodes without sort by hrw
+ builder := &testPlacementBuilder{
+ vectors: ns,
+ }
+
+ ownerKey, err := keys.NewPrivateKey()
+ require.NoError(t, err)
+
+ pool, err := ants.NewPool(4, ants.WithNonblocking(true))
+ require.NoError(t, err)
+
+ log, err := logger.NewLogger(nil)
+ require.NoError(t, err)
+
+ var n nmKeys
+ ecw := ECWriter{
+ Config: &Config{
+ NetmapKeys: n,
+ RemotePool: pool,
+ Logger: log,
+ ClientConstructor: clientConstructor{vectors: ns},
+ },
+ PlacementOpts: append(
+ []placement.Option{placement.UseBuilder(builder), placement.ForContainer(cnr)},
+ placement.WithCopyNumbers(nil)), // copies number ignored for EC
+ Container: cnr,
+ Key: &ownerKey.PrivateKey,
+ Relay: nil,
+ ObjectMetaValid: true,
+ }
+
+ err = ecw.WriteObject(context.Background(), obj)
+ require.NoError(t, err)
+}
+
+func testNodeMatrix(t testing.TB, dim []int) ([][]netmap.NodeInfo, [][]string) {
+ mNodes := make([][]netmap.NodeInfo, len(dim))
+ mAddr := make([][]string, len(dim))
+
+ for i := range dim {
+ ns := make([]netmap.NodeInfo, dim[i])
+ as := make([]string, dim[i])
+
+ for j := range dim[i] {
+ a := fmt.Sprintf("/ip4/192.168.0.%s/tcp/%s",
+ strconv.Itoa(i),
+ strconv.Itoa(60000+j),
+ )
+
+ var ni netmap.NodeInfo
+ ni.SetNetworkEndpoints(a)
+ ni.SetPublicKey([]byte(a))
+
+ var na network.AddressGroup
+
+ err := na.FromIterator(netmapcore.Node(ni))
+ require.NoError(t, err)
+
+ as[j] = network.StringifyGroup(na)
+
+ ns[j] = ni
+ }
+
+ mNodes[i] = ns
+ mAddr[i] = as
+ }
+
+ return mNodes, mAddr
+}
From d2a59b2de8572952df34e2b66c3bf51d03ce13d9 Mon Sep 17 00:00:00 2001
From: Aleksey Savchuk
Date: Mon, 14 Oct 2024 15:51:37 +0300
Subject: [PATCH 200/705] [#1429] lens/explorer: Fix locked object records
display text
Display texts for a locked object and a list of it lockers were mistakenly swapped.
Signed-off-by: Aleksey Savchuk
---
cmd/frostfs-lens/internal/schema/metabase/records/string.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/cmd/frostfs-lens/internal/schema/metabase/records/string.go b/cmd/frostfs-lens/internal/schema/metabase/records/string.go
index a6c70d537..ec0ab8e1a 100644
--- a/cmd/frostfs-lens/internal/schema/metabase/records/string.go
+++ b/cmd/frostfs-lens/internal/schema/metabase/records/string.go
@@ -38,7 +38,7 @@ func (r *ContainerVolumeRecord) String() string {
func (r *LockedRecord) String() string {
return fmt.Sprintf(
- "Locker OID %s %c Locked [%d]OID {...}",
+ "Object OID %s %c Lockers [%d]OID {...}",
common.FormatSimple(fmt.Sprintf("%-44s", r.id), tcell.ColorAqua),
tview.Borders.Vertical,
len(r.ids),
From 714ff784fa460767e82527b71fd520932b0256ed Mon Sep 17 00:00:00 2001
From: Vitaliy Potyarkin
Date: Mon, 14 Oct 2024 17:31:26 +0300
Subject: [PATCH 201/705] [#1431] objsvc: Use specific values in message about
address mismatch
This makes troubleshooting failed operations much easier
Signed-off-by: Vitaliy Potyarkin
---
pkg/services/object/common/target/target.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/pkg/services/object/common/target/target.go b/pkg/services/object/common/target/target.go
index a2d6b4d39..9e0f49297 100644
--- a/pkg/services/object/common/target/target.go
+++ b/pkg/services/object/common/target/target.go
@@ -86,7 +86,7 @@ func newTrustedTarget(prm *objectwriter.Params) (transformer.ChunkedObjectWriter
user.IDFromKey(&ownerSession, key.PublicKey)
if !ownerObj.Equals(ownerSession) {
- return nil, errors.New("session token is missing but object owner id is different from the default key")
+ return nil, fmt.Errorf("session token is missing but object owner id (%s) is different from the default key (%s)", ownerObj, ownerSession)
}
} else {
if !ownerObj.Equals(sessionInfo.Owner) {
From 3012286452e8b2bb04a6ad9b70e364b00b29919f Mon Sep 17 00:00:00 2001
From: Alexander Chuprov
Date: Mon, 14 Oct 2024 17:10:03 +0300
Subject: [PATCH 202/705] [#1431] metabase: Fix unreachable code
Signed-off-by: Alexander Chuprov
---
pkg/local_object_storage/metabase/delete.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/pkg/local_object_storage/metabase/delete.go b/pkg/local_object_storage/metabase/delete.go
index 3add1f268..b5ac22017 100644
--- a/pkg/local_object_storage/metabase/delete.go
+++ b/pkg/local_object_storage/metabase/delete.go
@@ -453,7 +453,7 @@ func delFKBTIndexItem(tx *bbolt.Tx, item namedBucketItem) error {
func hasAnyItem(b *bbolt.Bucket) bool {
var hasAnyItem bool
c := b.Cursor()
- for k, _ := c.First(); k != nil; k, _ = c.Next() {
+ for k, _ := c.First(); k != nil; {
hasAnyItem = true
break
}
From d53732f663ce46ff29196782b5d836a37c1f6c7d Mon Sep 17 00:00:00 2001
From: Alexander Chuprov
Date: Mon, 14 Oct 2024 17:17:00 +0300
Subject: [PATCH 203/705] [#1431] engine: Delete always false condition
Signed-off-by: Alexander Chuprov
---
pkg/local_object_storage/engine/evacuate.go | 3 ---
1 file changed, 3 deletions(-)
diff --git a/pkg/local_object_storage/engine/evacuate.go b/pkg/local_object_storage/engine/evacuate.go
index c1b9276f3..940e30323 100644
--- a/pkg/local_object_storage/engine/evacuate.go
+++ b/pkg/local_object_storage/engine/evacuate.go
@@ -56,9 +56,6 @@ func (s EvacuateScope) String() string {
var sb strings.Builder
first := true
if s&EvacuateScopeObjects == EvacuateScopeObjects {
- if !first {
- sb.WriteString(";")
- }
sb.WriteString("objects")
first = false
}
From 63466d71b22d8b23eff4d0fc1eea5a16d36a138b Mon Sep 17 00:00:00 2001
From: Alexander Chuprov
Date: Mon, 14 Oct 2024 17:20:14 +0300
Subject: [PATCH 204/705] [#1431] engine: Delete unused constants
Signed-off-by: Alexander Chuprov
---
cmd/frostfs-cli/modules/ape_manager/add_chain.go | 9 ++++-----
internal/logs/logs.go | 14 --------------
pkg/morph/client/container/client.go | 1 -
3 files changed, 4 insertions(+), 20 deletions(-)
diff --git a/cmd/frostfs-cli/modules/ape_manager/add_chain.go b/cmd/frostfs-cli/modules/ape_manager/add_chain.go
index c6622da25..a85f3c93e 100644
--- a/cmd/frostfs-cli/modules/ape_manager/add_chain.go
+++ b/cmd/frostfs-cli/modules/ape_manager/add_chain.go
@@ -31,11 +31,10 @@ const (
)
const (
- defaultNamespace = ""
- namespaceTarget = "namespace"
- containerTarget = "container"
- userTarget = "user"
- groupTarget = "group"
+ namespaceTarget = "namespace"
+ containerTarget = "container"
+ userTarget = "user"
+ groupTarget = "group"
)
var errUnknownTargetType = errors.New("unknown target type")
diff --git a/internal/logs/logs.go b/internal/logs/logs.go
index ca783a39d..b4bc31b0c 100644
--- a/internal/logs/logs.go
+++ b/internal/logs/logs.go
@@ -14,8 +14,6 @@ const (
InterruptPlacementIterationByContext = "interrupt placement iteration by context"
Notification = "notification"
-
- SkipDeprecatedNotification = "skip deprecated notification"
)
const (
@@ -41,8 +39,6 @@ const (
InnerringCantUpdatePersistentState = "can't update persistent state"
InnerringCloserError = "closer error"
InnerringReadConfigFromBlockchain = "read config from blockchain"
- NotificatorNotificatorStartProcessingObjectNotifications = "notificator: start processing object notifications"
- NotificatorNotificatorProcessingObjectNotification = "notificator: processing object notification"
PolicerCouldNotGetContainer = "could not get container"
PolicerCouldNotConfirmContainerRemoval = "could not confirm container removal"
PolicerCouldNotInhumeObjectWithMissingContainer = "could not inhume object with missing container"
@@ -61,7 +57,6 @@ const (
ReplicatorCouldNotReplicateObject = "could not replicate object"
ReplicatorObjectSuccessfullyReplicated = "object successfully replicated"
TreeRedirectingTreeServiceQuery = "redirecting tree service query"
- TreeBearerPresentedButNotAllowedByACL = "bearer presented but not allowed by ACL"
TreeCouldNotGetLastSynchronizedHeightForATree = "could not get last synchronized height for a tree"
TreeCouldNotUpdateLastSynchronizedHeightForATree = "could not update last synchronized height for a tree"
TreeSynchronizeTree = "synchronize tree"
@@ -107,7 +102,6 @@ const (
GetUnableToGetAllPartsECObject = "unable to get all parts, continue to reconstruct with existed"
GetUnableToGetPartECObject = "unable to get part of the erasure-encoded object"
GetUnableToHeadPartECObject = "unable to head part of the erasure-encoded object"
- GetUnableToGetECObjectContainer = "unable to get container for erasure-coded object"
GetUnableToHeadPartsECObject = "unable to head parts of the erasure-encoded object"
GetAssemblingSplittedObjectCompleted = "assembling splitted object completed"
GetAssemblingECObjectCompleted = "assembling erasure-coded object completed"
@@ -271,9 +265,7 @@ const (
ShardFailureToMarkLockersAsGarbage = "failure to mark lockers as garbage"
ShardFailureToGetExpiredUnlockedObjects = "failure to get expired unlocked objects"
ShardCouldNotMarkObjectToDeleteInMetabase = "could not mark object to delete in metabase"
- WritecacheTriedToFlushItemsFromWritecache = "tried to flush items from write-cache"
WritecacheWaitingForChannelsToFlush = "waiting for channels to flush"
- WritecacheCantRemoveObjectsFromTheDatabase = "can't remove objects from the database"
WritecacheCantRemoveObjectFromWritecache = "can't remove object from write-cache"
BlobovniczatreeCouldNotGetObjectFromLevel = "could not get object from level"
BlobovniczatreeCouldNotCloseBlobovnicza = "could not close Blobovnicza"
@@ -413,11 +405,6 @@ const (
FrostFSNodeReadNewlyCreatedContainerAfterTheNotification = "read newly created container after the notification"
FrostFSNodeContainerCreationEventsReceipt = "container creation event's receipt"
FrostFSNodeContainerRemovalEventsReceipt = "container removal event's receipt"
- FrostFSNodeNotificatorCouldNotListContainers = "notificator: could not list containers"
- FrostFSNodeNotificatorCouldNotSelectObjectsFromContainer = "notificator: could not select objects from container"
- FrostFSNodeNotificatorCouldNotProcessObject = "notificator: could not process object"
- FrostFSNodeNotificatorFinishedProcessingObjectNotifications = "notificator: finished processing object notifications"
- FrostFSNodeCouldNotWriteObjectNotification = "could not write object notification"
FrostFSNodeCouldNotGetMaxObjectSizeValue = "could not get max object size value"
FrostFSNodeCouldNotInhumeMarkRedundantCopyAsGarbage = "could not inhume mark redundant copy as garbage"
FrostFSNodeFailedInitTracing = "failed init tracing"
@@ -461,7 +448,6 @@ const (
FSTreeCantUnmarshalObject = "can't unmarshal an object"
FSTreeCantFushObjectBlobstor = "can't flush an object to blobstor"
FSTreeCantUpdateID = "can't update object storage ID"
- FSTreeCantDecodeDBObjectAddress = "can't decode object address from the DB"
PutSingleRedirectFailure = "failed to redirect PutSingle request"
StorageIDRetrievalFailure = "can't get storage ID from metabase"
ObjectRemovalFailureBlobStor = "can't remove object from blobStor"
diff --git a/pkg/morph/client/container/client.go b/pkg/morph/client/container/client.go
index fc892aafb..f735a5ff7 100644
--- a/pkg/morph/client/container/client.go
+++ b/pkg/morph/client/container/client.go
@@ -33,7 +33,6 @@ const (
startEstimationMethod = "startContainerEstimation"
stopEstimationMethod = "stopContainerEstimation"
- putSizeMethod = "putContainerSize"
listSizesMethod = "listContainerSizes"
getSizeMethod = "getContainerSize"
From 00b1cecfb7486aac93e8806caa6563fe75eabc1b Mon Sep 17 00:00:00 2001
From: Alexander Chuprov
Date: Mon, 14 Oct 2024 17:25:00 +0300
Subject: [PATCH 205/705] [#1431] obj_storage/shard: Fix visibility of
'newMetricStore'
Signed-off-by: Alexander Chuprov
---
pkg/local_object_storage/shard/control_test.go | 4 ++--
pkg/local_object_storage/shard/metrics_test.go | 4 ++--
pkg/local_object_storage/shard/reload_test.go | 2 +-
3 files changed, 5 insertions(+), 5 deletions(-)
diff --git a/pkg/local_object_storage/shard/control_test.go b/pkg/local_object_storage/shard/control_test.go
index 44fee1636..6b9eaa550 100644
--- a/pkg/local_object_storage/shard/control_test.go
+++ b/pkg/local_object_storage/shard/control_test.go
@@ -126,7 +126,7 @@ func TestRefillMetabaseCorrupted(t *testing.T) {
}),
}
- mm := NewMetricStore()
+ mm := newMetricStore()
sh := New(
WithID(NewIDFromBytes([]byte{})),
@@ -190,7 +190,7 @@ func TestRefillMetabase(t *testing.T) {
}),
}
- mm := NewMetricStore()
+ mm := newMetricStore()
sh := New(
WithID(NewIDFromBytes([]byte{})),
diff --git a/pkg/local_object_storage/shard/metrics_test.go b/pkg/local_object_storage/shard/metrics_test.go
index 56622326a..cec5a12ad 100644
--- a/pkg/local_object_storage/shard/metrics_test.go
+++ b/pkg/local_object_storage/shard/metrics_test.go
@@ -35,7 +35,7 @@ type metricsStore struct {
refillStatus string
}
-func NewMetricStore() *metricsStore {
+func newMetricStore() *metricsStore {
return &metricsStore{
objCounters: map[string]uint64{
"phy": 0,
@@ -404,7 +404,7 @@ func shardWithMetrics(t *testing.T, path string) (*Shard, *metricsStore) {
}),
}
- mm := NewMetricStore()
+ mm := newMetricStore()
sh := New(
WithID(NewIDFromBytes([]byte{})),
diff --git a/pkg/local_object_storage/shard/reload_test.go b/pkg/local_object_storage/shard/reload_test.go
index 7dacbfa6c..7dd7189bb 100644
--- a/pkg/local_object_storage/shard/reload_test.go
+++ b/pkg/local_object_storage/shard/reload_test.go
@@ -51,7 +51,7 @@ func TestShardReload(t *testing.T) {
WithMetaBaseOptions(metaOpts...),
WithPiloramaOptions(
pilorama.WithPath(filepath.Join(p, "pilorama"))),
- WithMetricsWriter(NewMetricStore()),
+ WithMetricsWriter(newMetricStore()),
}
sh := New(opts...)
From f6582081a4ee67e97773f655b8f18148946c5a0c Mon Sep 17 00:00:00 2001
From: Alexander Chuprov
Date: Mon, 14 Oct 2024 17:31:15 +0300
Subject: [PATCH 206/705] [#1431] obj_storage/metabase: Delete unused variable
Signed-off-by: Alexander Chuprov
---
pkg/local_object_storage/metabase/put.go | 5 ++---
pkg/services/object/remote_reader.go | 3 ---
2 files changed, 2 insertions(+), 6 deletions(-)
diff --git a/pkg/local_object_storage/metabase/put.go b/pkg/local_object_storage/metabase/put.go
index 0c14196b7..b329e8032 100644
--- a/pkg/local_object_storage/metabase/put.go
+++ b/pkg/local_object_storage/metabase/put.go
@@ -59,9 +59,8 @@ func (p *PutPrm) SetIndexAttributes(v bool) {
}
var (
- ErrUnknownObjectType = errors.New("unknown object type")
- ErrIncorrectSplitInfoUpdate = errors.New("updating split info on object without it")
- ErrIncorrectRootObject = errors.New("invalid root object")
+ ErrUnknownObjectType = errors.New("unknown object type")
+ ErrIncorrectRootObject = errors.New("invalid root object")
)
// Put saves object header in metabase. Object payload expected to be cut.
diff --git a/pkg/services/object/remote_reader.go b/pkg/services/object/remote_reader.go
index 18b6107cf..bc6ffd160 100644
--- a/pkg/services/object/remote_reader.go
+++ b/pkg/services/object/remote_reader.go
@@ -2,7 +2,6 @@ package object
import (
"context"
- "errors"
"fmt"
clientcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
@@ -35,8 +34,6 @@ type RemoteRequestPrm struct {
const remoteOpTTL = 1
-var ErrNotFound = errors.New("object header not found")
-
// NewRemoteReader creates, initializes and returns new RemoteHeader instance.
func NewRemoteReader(keyStorage *util.KeyStorage, cache ClientConstructor) *RemoteReader {
return &RemoteReader{
From d83879d4b859f016a9bfef808b19324ce593814e Mon Sep 17 00:00:00 2001
From: Alexander Chuprov
Date: Mon, 14 Oct 2024 18:05:55 +0300
Subject: [PATCH 207/705] [#1431] node: Fix comment format
Signed-off-by: Alexander Chuprov
---
cmd/frostfs-node/config/profiler/config.go | 2 +-
cmd/frostfs-node/object.go | 2 +-
pkg/core/netmap/keys.go | 2 +-
.../netmap/nodevalidation/locode/deps.go | 16 ++++++++--------
pkg/innerring/processors/netmap/processor.go | 2 +-
pkg/local_object_storage/shard/metrics.go | 2 +-
pkg/morph/client/actor.go | 2 +-
pkg/morph/event/container/put_notary.go | 2 +-
pkg/network/group.go | 4 ++--
pkg/services/control/ir/server/deps.go | 2 +-
pkg/services/control/rpc.go | 2 +-
pkg/services/control/server/server.go | 4 ++--
pkg/services/netmap/executor.go | 4 ++--
pkg/services/object/common/writer/distributed.go | 2 +-
pkg/services/object/delete/service.go | 4 ++--
pkg/services/object/patch/service.go | 2 +-
pkg/services/policer/option.go | 2 +-
pkg/util/rand/rand.go | 2 +-
pkg/util/sdnotify/sdnotify.go | 2 +-
19 files changed, 30 insertions(+), 30 deletions(-)
diff --git a/cmd/frostfs-node/config/profiler/config.go b/cmd/frostfs-node/config/profiler/config.go
index 191694970..6c3e8adab 100644
--- a/cmd/frostfs-node/config/profiler/config.go
+++ b/cmd/frostfs-node/config/profiler/config.go
@@ -52,7 +52,7 @@ func Address(c *config.Config) string {
return AddressDefault
}
-// BlockRates returns the value of "block_rate" config parameter
+// BlockRate returns the value of "block_rate" config parameter
// from "pprof" section.
func BlockRate(c *config.Config) int {
s := c.Sub(subsection)
diff --git a/cmd/frostfs-node/object.go b/cmd/frostfs-node/object.go
index 68acb05d3..c484c5d8c 100644
--- a/cmd/frostfs-node/object.go
+++ b/cmd/frostfs-node/object.go
@@ -117,7 +117,7 @@ func (i *delNetInfo) TombstoneLifetime() (uint64, error) {
return i.cfg.cfgObject.tombstoneLifetime.Load(), nil
}
-// returns node owner ID calculated from configured private key.
+// LocalNodeID returns node owner ID calculated from configured private key.
//
// Implements method needed for Object.Delete service.
func (i *delNetInfo) LocalNodeID() user.ID {
diff --git a/pkg/core/netmap/keys.go b/pkg/core/netmap/keys.go
index 29cb2dc94..0c64bb798 100644
--- a/pkg/core/netmap/keys.go
+++ b/pkg/core/netmap/keys.go
@@ -2,6 +2,6 @@ package netmap
// AnnouncedKeys is an interface of utility for working with the announced public keys of the storage nodes.
type AnnouncedKeys interface {
- // Checks if the key was announced by a local node.
+ // IsLocalKey checks if the key was announced by a local node.
IsLocalKey(key []byte) bool
}
diff --git a/pkg/innerring/processors/netmap/nodevalidation/locode/deps.go b/pkg/innerring/processors/netmap/nodevalidation/locode/deps.go
index 8f6667933..ba5db9205 100644
--- a/pkg/innerring/processors/netmap/nodevalidation/locode/deps.go
+++ b/pkg/innerring/processors/netmap/nodevalidation/locode/deps.go
@@ -8,38 +8,38 @@ import (
// Record is an interface of read-only
// FrostFS LOCODE database single entry.
type Record interface {
- // Must return ISO 3166-1 alpha-2
+ // CountryCode must return ISO 3166-1 alpha-2
// country code.
//
// Must not return nil.
CountryCode() *locodedb.CountryCode
- // Must return English short country name
+ // CountryName must return English short country name
// officially used by the ISO 3166
// Maintenance Agency (ISO 3166/MA).
CountryName() string
- // Must return UN/LOCODE 3-character code
+ // LocationCode must return UN/LOCODE 3-character code
// for the location (numerals 2-9 may also
// be used).
//
// Must not return nil.
LocationCode() *locodedb.LocationCode
- // Must return name of the location which
+ // LocationName must return name of the location which
// have been allocated a UN/LOCODE without
// diacritic sign.
LocationName() string
- // Must return ISO 1-3 character alphabetic
+ // SubDivCode Must return ISO 1-3 character alphabetic
// and/or numeric code for the administrative
// division of the country concerned.
SubDivCode() string
- // Must return subdivision name.
+ // SubDivName must return subdivision name.
SubDivName() string
- // Must return existing continent where is
+ // Continent must return existing continent where is
// the location.
//
// Must not return nil.
@@ -49,7 +49,7 @@ type Record interface {
// DB is an interface of read-only
// FrostFS LOCODE database.
type DB interface {
- // Must find the record that corresponds to
+ // Get must find the record that corresponds to
// LOCODE and provides the Record interface.
//
// Must return an error if Record is nil.
diff --git a/pkg/innerring/processors/netmap/processor.go b/pkg/innerring/processors/netmap/processor.go
index e8fb8721b..4cecda59c 100644
--- a/pkg/innerring/processors/netmap/processor.go
+++ b/pkg/innerring/processors/netmap/processor.go
@@ -43,7 +43,7 @@ type (
// of information about the node and its finalization for adding
// to the network map.
NodeValidator interface {
- // Must verify and optionally update NodeInfo structure.
+ // VerifyAndUpdate must verify and optionally update NodeInfo structure.
//
// Must return an error if NodeInfo input is invalid.
// Must return an error if it is not possible to correctly
diff --git a/pkg/local_object_storage/shard/metrics.go b/pkg/local_object_storage/shard/metrics.go
index 91bf8d0ae..087ba42ef 100644
--- a/pkg/local_object_storage/shard/metrics.go
+++ b/pkg/local_object_storage/shard/metrics.go
@@ -22,7 +22,7 @@ type MetricsWriter interface {
// SetShardID must set (update) the shard identifier that will be used in
// metrics.
SetShardID(id string)
- // SetReadonly must set shard mode.
+ // SetMode set mode of shard.
SetMode(mode mode.Mode)
// SetContainerObjectsCount sets container object count.
SetContainerObjectsCount(cnrID string, objectType string, value uint64)
diff --git a/pkg/morph/client/actor.go b/pkg/morph/client/actor.go
index b6718dea5..2849f3052 100644
--- a/pkg/morph/client/actor.go
+++ b/pkg/morph/client/actor.go
@@ -16,7 +16,7 @@ type actorProvider interface {
GetRPCActor() actor.RPCActor
}
-// Client switches an established connection with neo-go if it is broken.
+// SwitchRPCGuardedActor switches an established connection with neo-go if it is broken.
// This leads to an invalidation of an rpc actor within Client. That means the
// components that are initilized with the rpc actor may unintentionally use
// it when it is already invalidated. SwitchRPCGuardedActor is used to prevent
diff --git a/pkg/morph/event/container/put_notary.go b/pkg/morph/event/container/put_notary.go
index f5779ced6..6b2ee7b0a 100644
--- a/pkg/morph/event/container/put_notary.go
+++ b/pkg/morph/event/container/put_notary.go
@@ -46,7 +46,7 @@ const (
// put container requests.
PutNotaryEvent = "put"
- // PutNotaryEvent is an ID of notary "put named container" notification.
+ // PutNamedNotaryEvent is an ID of notary "put named container" notification.
PutNamedNotaryEvent = "putNamed"
)
diff --git a/pkg/network/group.go b/pkg/network/group.go
index a6de0653e..9843b14d4 100644
--- a/pkg/network/group.go
+++ b/pkg/network/group.go
@@ -67,11 +67,11 @@ func (x AddressGroup) Swap(i, j int) {
// MultiAddressIterator is an interface of network address group.
type MultiAddressIterator interface {
- // Must iterate over network addresses and pass each one
+ // IterateAddresses must iterate over network addresses and pass each one
// to the handler until it returns true.
IterateAddresses(func(string) bool)
- // Must return number of addresses in group.
+ // NumberOfAddresses must return number of addresses in group.
NumberOfAddresses() int
}
diff --git a/pkg/services/control/ir/server/deps.go b/pkg/services/control/ir/server/deps.go
index 0c2de5300..9d5cfefc8 100644
--- a/pkg/services/control/ir/server/deps.go
+++ b/pkg/services/control/ir/server/deps.go
@@ -5,7 +5,7 @@ import control "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/
// HealthChecker is component interface for calculating
// the current health status of a node.
type HealthChecker interface {
- // Must calculate and return current health status of the IR application.
+ // HealthStatus must calculate and return current health status of the IR application.
//
// If status can not be calculated for any reason,
// control.HealthStatus_HEALTH_STATUS_UNDEFINED should be returned.
diff --git a/pkg/services/control/rpc.go b/pkg/services/control/rpc.go
index 80aece008..04524a68c 100644
--- a/pkg/services/control/rpc.go
+++ b/pkg/services/control/rpc.go
@@ -292,7 +292,7 @@ func ListTargetsLocalOverrides(cli *client.Client, req *ListTargetsLocalOverride
return wResp.message, nil
}
-// RemoveChainLocalOverride executes ControlService.RemoveChainLocalOverride RPC.
+// GetChainLocalOverride executes ControlService.RemoveChainLocalOverride RPC.
func GetChainLocalOverride(cli *client.Client, req *GetChainLocalOverrideRequest, opts ...client.CallOption) (*GetChainLocalOverrideResponse, error) {
wResp := newResponseWrapper[GetChainLocalOverrideResponse]()
wReq := &requestWrapper{m: req}
diff --git a/pkg/services/control/server/server.go b/pkg/services/control/server/server.go
index f3fe56a46..b6fdcb246 100644
--- a/pkg/services/control/server/server.go
+++ b/pkg/services/control/server/server.go
@@ -26,13 +26,13 @@ type Server struct {
// HealthChecker is component interface for calculating
// the current health status of a node.
type HealthChecker interface {
- // Must calculate and return current status of the node in FrostFS network map.
+ // NetmapStatus must calculate and return current status of the node in FrostFS network map.
//
// If status can not be calculated for any reason,
// control.netmapStatus_STATUS_UNDEFINED should be returned.
NetmapStatus() control.NetmapStatus
- // Must calculate and return current health status of the node application.
+ // HealthStatus must calculate and return current health status of the node application.
//
// If status can not be calculated for any reason,
// control.HealthStatus_HEALTH_STATUS_UNDEFINED should be returned.
diff --git a/pkg/services/netmap/executor.go b/pkg/services/netmap/executor.go
index 496b07a98..ae2044246 100644
--- a/pkg/services/netmap/executor.go
+++ b/pkg/services/netmap/executor.go
@@ -26,7 +26,7 @@ type executorSvc struct {
// NodeState encapsulates information
// about current node state.
type NodeState interface {
- // Must return current node state
+ // LocalNodeInfo must return current node state
// in FrostFS API v2 NodeInfo structure.
LocalNodeInfo() (*netmap.NodeInfo, error)
@@ -39,7 +39,7 @@ type NodeState interface {
// NetworkInfo encapsulates source of the
// recent information about the FrostFS network.
type NetworkInfo interface {
- // Must return recent network information in FrostFS API v2 NetworkInfo structure.
+ // Dump must return recent network information in FrostFS API v2 NetworkInfo structure.
//
// If protocol version is <=2.9, MillisecondsPerBlock and network config should be unset.
Dump(versionsdk.Version) (*netmapSDK.NetworkInfo, error)
diff --git a/pkg/services/object/common/writer/distributed.go b/pkg/services/object/common/writer/distributed.go
index f62934bed..f7486eae7 100644
--- a/pkg/services/object/common/writer/distributed.go
+++ b/pkg/services/object/common/writer/distributed.go
@@ -28,7 +28,7 @@ type distributedWriter struct {
resetSuccessAfterOnBroadcast bool
}
-// parameters and state of container Traversal.
+// Traversal parameters and state of container.
type Traversal struct {
Opts []placement.Option
diff --git a/pkg/services/object/delete/service.go b/pkg/services/object/delete/service.go
index 0ba21eee3..e4f7a8c50 100644
--- a/pkg/services/object/delete/service.go
+++ b/pkg/services/object/delete/service.go
@@ -27,11 +27,11 @@ type Option func(*cfg)
type NetworkInfo interface {
netmap.State
- // Must return the lifespan of the tombstones
+ // TombstoneLifetime must return the lifespan of the tombstones
// in the FrostFS epochs.
TombstoneLifetime() (uint64, error)
- // Returns user ID of the local storage node. Result must not be nil.
+ // LocalNodeID returns user ID of the local storage node. Result must not be nil.
// New tombstone objects will have the result as an owner ID if removal is executed w/o a session.
LocalNodeID() user.ID
}
diff --git a/pkg/services/object/patch/service.go b/pkg/services/object/patch/service.go
index f1082dfff..953f82b48 100644
--- a/pkg/services/object/patch/service.go
+++ b/pkg/services/object/patch/service.go
@@ -26,7 +26,7 @@ func NewService(cfg *objectwriter.Config,
}
}
-// Put calls internal service and returns v2 object streamer.
+// Patch calls internal service and returns v2 object streamer.
func (s *Service) Patch() (object.PatchObjectStream, error) {
nodeKey, err := s.Config.KeyStorage.GetKey(nil)
if err != nil {
diff --git a/pkg/services/policer/option.go b/pkg/services/policer/option.go
index 9dbfd8b9f..336f7a0ab 100644
--- a/pkg/services/policer/option.go
+++ b/pkg/services/policer/option.go
@@ -143,7 +143,7 @@ func WithPlacementBuilder(v placement.Builder) Option {
}
}
-// WithRemoteObjectHeader returns option to set remote object header receiver of Policer.
+// WithRemoteObjectHeaderFunc returns option to set remote object header receiver of Policer.
func WithRemoteObjectHeaderFunc(v RemoteObjectHeaderFunc) Option {
return func(c *cfg) {
c.remoteHeader = v
diff --git a/pkg/util/rand/rand.go b/pkg/util/rand/rand.go
index 97508f82a..a06296a07 100644
--- a/pkg/util/rand/rand.go
+++ b/pkg/util/rand/rand.go
@@ -13,7 +13,7 @@ func Uint64() uint64 {
return source.Uint64()
}
-// Uint64 returns a random uint32 value.
+// Uint32 returns a random uint32 value.
func Uint32() uint32 {
return source.Uint32()
}
diff --git a/pkg/util/sdnotify/sdnotify.go b/pkg/util/sdnotify/sdnotify.go
index e94ff77ad..22549bc96 100644
--- a/pkg/util/sdnotify/sdnotify.go
+++ b/pkg/util/sdnotify/sdnotify.go
@@ -24,7 +24,7 @@ var (
errSocketIsNotInitialized = errors.New("socket is not initialized")
)
-// Initializes socket with provided name of
+// InitSocket initializes socket with provided name of
// environment variable.
func InitSocket() error {
notifySocket := os.Getenv("NOTIFY_SOCKET")
From 41038b2ec0fab0d9488f15330e8777f053a28c03 Mon Sep 17 00:00:00 2001
From: Alexander Chuprov
Date: Mon, 14 Oct 2024 18:16:59 +0300
Subject: [PATCH 208/705] [#1431] node: Fix 'empty slice declaration using a
literal'
Signed-off-by: Alexander Chuprov
---
pkg/innerring/processors/alphabet/handlers_test.go | 6 +++---
pkg/local_object_storage/shard/control.go | 2 +-
pkg/morph/event/notary_preparator_test.go | 2 +-
scripts/populate-metabase/internal/generate.go | 8 ++++----
4 files changed, 9 insertions(+), 9 deletions(-)
diff --git a/pkg/innerring/processors/alphabet/handlers_test.go b/pkg/innerring/processors/alphabet/handlers_test.go
index dfda37472..c7a004b54 100644
--- a/pkg/innerring/processors/alphabet/handlers_test.go
+++ b/pkg/innerring/processors/alphabet/handlers_test.go
@@ -95,7 +95,7 @@ func TestProcessorEmitsGasToNetmapIfNoParsedWallets(t *testing.T) {
t.Parallel()
var emission uint64 = 100_000
var index int = 5
- var parsedWallets []util.Uint160 = []util.Uint160{}
+ var parsedWallets []util.Uint160
alphabetContracts := innerring.NewAlphabetContracts()
for i := range index + 1 {
@@ -167,7 +167,7 @@ func TestProcessorDoesntEmitGasIfNoNetmapOrParsedWallets(t *testing.T) {
t.Parallel()
var emission uint64 = 100_000
var index int = 5
- var parsedWallets []util.Uint160 = []util.Uint160{}
+ var parsedWallets []util.Uint160
alphabetContracts := innerring.NewAlphabetContracts()
for i := range index + 1 {
@@ -176,7 +176,7 @@ func TestProcessorDoesntEmitGasIfNoNetmapOrParsedWallets(t *testing.T) {
morphClient := &testMorphClient{}
- nodes := []netmap.NodeInfo{}
+ var nodes []netmap.NodeInfo
network := &netmap.NetMap{}
network.SetNodes(nodes)
diff --git a/pkg/local_object_storage/shard/control.go b/pkg/local_object_storage/shard/control.go
index 4f9f25608..62800dbd0 100644
--- a/pkg/local_object_storage/shard/control.go
+++ b/pkg/local_object_storage/shard/control.go
@@ -368,7 +368,7 @@ func (s *Shard) Close() error {
if s.rb != nil {
s.rb.Stop(s.log)
}
- components := []interface{ Close() error }{}
+ var components []interface{ Close() error }
if s.pilorama != nil {
components = append(components, s.pilorama)
diff --git a/pkg/morph/event/notary_preparator_test.go b/pkg/morph/event/notary_preparator_test.go
index 4c269bcbd..60ddb4601 100644
--- a/pkg/morph/event/notary_preparator_test.go
+++ b/pkg/morph/event/notary_preparator_test.go
@@ -25,7 +25,7 @@ var (
alphaKeys keys.PublicKeys
wrongAlphaKeys keys.PublicKeys
- dummyAlphabetInvocationScript = []byte{} // expected to be empty if generated by Notary Actor, as requester can't fill it in
+ dummyAlphabetInvocationScript []byte
dummyAlphabetInvocationScriptOld = append([]byte{byte(opcode.PUSHDATA1), 64}, make([]byte, 64)...) // expected to be dummy if generated manually
wrongDummyInvocationScript = append([]byte{byte(opcode.PUSHDATA1), 64, 1}, make([]byte, 63)...)
diff --git a/scripts/populate-metabase/internal/generate.go b/scripts/populate-metabase/internal/generate.go
index d2004b673..8a96dcaaa 100644
--- a/scripts/populate-metabase/internal/generate.go
+++ b/scripts/populate-metabase/internal/generate.go
@@ -18,7 +18,7 @@ import (
)
func GeneratePayloadPool(count uint, size uint) [][]byte {
- pool := [][]byte{}
+ var pool [][]byte
for i := uint(0); i < count; i++ {
payload := make([]byte, size)
_, _ = rand.Read(payload)
@@ -29,7 +29,7 @@ func GeneratePayloadPool(count uint, size uint) [][]byte {
}
func GenerateAttributePool(count uint) []objectSDK.Attribute {
- pool := []objectSDK.Attribute{}
+ var pool []objectSDK.Attribute
for i := uint(0); i < count; i++ {
for j := uint(0); j < count; j++ {
attr := *objectSDK.NewAttribute()
@@ -42,7 +42,7 @@ func GenerateAttributePool(count uint) []objectSDK.Attribute {
}
func GenerateOwnerPool(count uint) []user.ID {
- pool := []user.ID{}
+ var pool []user.ID
for i := uint(0); i < count; i++ {
pool = append(pool, usertest.ID())
}
@@ -117,7 +117,7 @@ func WithPayloadFromPool(pool [][]byte) ObjectOption {
func WithAttributesFromPool(pool []objectSDK.Attribute, count uint) ObjectOption {
return func(obj *objectSDK.Object) {
- attrs := []objectSDK.Attribute{}
+ var attrs []objectSDK.Attribute
for i := uint(0); i < count; i++ {
attrs = append(attrs, pool[rand.Intn(len(pool))])
}
From 07ce40e1196a44d305390dcea8e1e0040f6a16d2 Mon Sep 17 00:00:00 2001
From: Alexander Chuprov
Date: Tue, 15 Oct 2024 12:28:58 +0300
Subject: [PATCH 209/705] [#1430] adm/morph: Add NNS address display in
'deploy'
Signed-off-by: Alexander Chuprov
---
cmd/frostfs-adm/internal/modules/morph/helper/contract.go | 1 +
1 file changed, 1 insertion(+)
diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/contract.go b/cmd/frostfs-adm/internal/modules/morph/helper/contract.go
index 2011301d1..eea3b040e 100644
--- a/cmd/frostfs-adm/internal/modules/morph/helper/contract.go
+++ b/cmd/frostfs-adm/internal/modules/morph/helper/contract.go
@@ -166,5 +166,6 @@ func DeployNNS(c *InitializeContext, method string) error {
return fmt.Errorf("can't send deploy transaction: %w", err)
}
+ c.Command.Println("NNS hash:", invokeHash.StringLE())
return c.AwaitTx()
}
From 90f36693995e1b411094686e4419bb7d11831f35 Mon Sep 17 00:00:00 2001
From: Alexander Chuprov
Date: Mon, 14 Oct 2024 16:07:38 +0300
Subject: [PATCH 210/705] [#1342] network/cache: Add node address to error
multiClient
Signed-off-by: Alexander Chuprov
---
pkg/network/cache/multi.go | 9 +++++----
1 file changed, 5 insertions(+), 4 deletions(-)
diff --git a/pkg/network/cache/multi.go b/pkg/network/cache/multi.go
index 9305c143b..b83cbb217 100644
--- a/pkg/network/cache/multi.go
+++ b/pkg/network/cache/multi.go
@@ -169,15 +169,16 @@ func (x *multiClient) iterateClients(ctx context.Context, f func(clientcore.Clie
var siErr *objectSDK.SplitInfoError
var eiErr *objectSDK.ECInfoError
+ if err != nil {
+ err = fmt.Errorf("client connection error at %v: %w", addr, err)
+ x.ReportError(err)
+ }
+
success := err == nil || errors.Is(err, context.Canceled) || errors.As(err, &siErr) || errors.As(err, &eiErr)
if success || firstErr == nil || errors.Is(firstErr, errRecentlyFailed) {
firstErr = err
}
- if err != nil {
- x.ReportError(err)
- }
-
return success
})
From b0c5def2d934ed5b79f54fb37160560f576785f4 Mon Sep 17 00:00:00 2001
From: Evgenii Stratonikov
Date: Thu, 17 Oct 2024 14:16:03 +0300
Subject: [PATCH 211/705] [#1433] shard/test: Use WithDisabledGC() option where
possible
Signed-off-by: Evgenii Stratonikov
---
pkg/local_object_storage/shard/gc_internal_test.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/pkg/local_object_storage/shard/gc_internal_test.go b/pkg/local_object_storage/shard/gc_internal_test.go
index 3993593ad..11db5e54e 100644
--- a/pkg/local_object_storage/shard/gc_internal_test.go
+++ b/pkg/local_object_storage/shard/gc_internal_test.go
@@ -73,10 +73,10 @@ func Test_ObjectNotFoundIfNotDeletedFromMetabase(t *testing.T) {
return pool
}),
WithGCRemoverSleepInterval(1 * time.Second),
+ WithDisabledGC(),
}
sh = New(opts...)
- sh.gcCfg.testHookRemover = func(context.Context) gcRunResult { return gcRunResult{} }
require.NoError(t, sh.Open(context.Background()))
require.NoError(t, sh.Init(context.Background()))
defer func() { require.NoError(t, sh.Close()) }()
From b42bcdc6fa6cca2cf8e5a5fbaf2c8cf82f957b37 Mon Sep 17 00:00:00 2001
From: Evgenii Stratonikov
Date: Thu, 17 Oct 2024 14:37:26 +0300
Subject: [PATCH 212/705] [#1433] services/object: Put object before auxiliary
info
Consider the following operations ordering:
1. Inhume(with tombstone A) --> add tombstone mark for an object
2. --> new epoch arives
3. --> GCMark is added for a tombstone A, because it is unavailable
4. Put(A) --> return error, because the object already has a GCMark
It is possible, and I have successfully reproduced it with a test on the
shard level. However, the error is related to the specific
_ordering_ of operations with engine. And triggering race-conditions like
this is only possible on a shard level currently, so no tests are
written.
Signed-off-by: Evgenii Stratonikov
---
pkg/services/object/common/writer/local.go | 7 ++++---
1 file changed, 4 insertions(+), 3 deletions(-)
diff --git a/pkg/services/object/common/writer/local.go b/pkg/services/object/common/writer/local.go
index e219b44dd..cf3d03275 100644
--- a/pkg/services/object/common/writer/local.go
+++ b/pkg/services/object/common/writer/local.go
@@ -32,6 +32,10 @@ type LocalTarget struct {
}
func (t LocalTarget) WriteObject(ctx context.Context, obj *objectSDK.Object, meta objectCore.ContentMeta) error {
+ if err := t.Storage.Put(ctx, obj, containerCore.IsIndexedContainer(t.Container)); err != nil {
+ return fmt.Errorf("(%T) could not put object to local storage: %w", t, err)
+ }
+
switch meta.Type() {
case objectSDK.TypeTombstone:
err := t.Storage.Delete(ctx, objectCore.AddressOf(obj), meta.Objects())
@@ -47,8 +51,5 @@ func (t LocalTarget) WriteObject(ctx context.Context, obj *objectSDK.Object, met
// objects that do not change meta storage
}
- if err := t.Storage.Put(ctx, obj, containerCore.IsIndexedContainer(t.Container)); err != nil {
- return fmt.Errorf("(%T) could not put object to local storage: %w", t, err)
- }
return nil
}
From 3304afa9d1f9893ad72bcd9445751798b6558c16 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Tue, 8 Oct 2024 15:24:01 +0300
Subject: [PATCH 213/705] [#1422] config: Add multinet config
Signed-off-by: Dmitrii Stepanov
---
cmd/frostfs-node/config/multinet/config.go | 62 +++++++++++++++++++
.../config/multinet/config_test.go | 52 ++++++++++++++++
config/example/ir.env | 9 +++
config/example/ir.yaml | 15 +++++
config/example/node.env | 10 +++
config/example/node.json | 22 +++++++
config/example/node.yaml | 15 +++++
docs/storage-node-configuration.md | 39 ++++++++++--
8 files changed, 219 insertions(+), 5 deletions(-)
create mode 100644 cmd/frostfs-node/config/multinet/config.go
create mode 100644 cmd/frostfs-node/config/multinet/config_test.go
diff --git a/cmd/frostfs-node/config/multinet/config.go b/cmd/frostfs-node/config/multinet/config.go
new file mode 100644
index 000000000..f598efc51
--- /dev/null
+++ b/cmd/frostfs-node/config/multinet/config.go
@@ -0,0 +1,62 @@
+package multinet
+
+import (
+ "strconv"
+ "time"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
+)
+
+const (
+ subsection = "multinet"
+
+ FallbackDelayDefault = 300 * time.Millisecond
+)
+
+// Enabled returns the value of "enabled" config parameter from "multinet" section.
+func Enabled(c *config.Config) bool {
+ return config.BoolSafe(c.Sub(subsection), "enabled")
+}
+
+type Subnet struct {
+ Mask string
+ SourceIPs []string
+}
+
+// Subnets returns the value of "subnets" config parameter from "multinet" section.
+func Subnets(c *config.Config) []Subnet {
+ var result []Subnet
+ sub := c.Sub(subsection).Sub("subnets")
+ for i := 0; ; i++ {
+ s := sub.Sub(strconv.FormatInt(int64(i), 10))
+ mask := config.StringSafe(s, "mask")
+ if mask == "" {
+ break
+ }
+ sourceIPs := config.StringSliceSafe(s, "source_ips")
+ result = append(result, Subnet{
+ Mask: mask,
+ SourceIPs: sourceIPs,
+ })
+ }
+ return result
+}
+
+// Balancer returns the value of "balancer" config parameter from "multinet" section.
+func Balancer(c *config.Config) string {
+ return config.StringSafe(c.Sub(subsection), "balancer")
+}
+
+// Restrict returns the value of "restrict" config parameter from "multinet" section.
+func Restrict(c *config.Config) bool {
+ return config.BoolSafe(c.Sub(subsection), "restrict")
+}
+
+// FallbackDelay returns the value of "fallback_delay" config parameter from "multinet" section.
+func FallbackDelay(c *config.Config) time.Duration {
+ fd := config.DurationSafe(c.Sub(subsection), "fallback_delay")
+ if fd != 0 { // negative value means no fallback
+ return fd
+ }
+ return FallbackDelayDefault
+}
diff --git a/cmd/frostfs-node/config/multinet/config_test.go b/cmd/frostfs-node/config/multinet/config_test.go
new file mode 100644
index 000000000..5f7dc6d53
--- /dev/null
+++ b/cmd/frostfs-node/config/multinet/config_test.go
@@ -0,0 +1,52 @@
+package multinet
+
+import (
+ "testing"
+ "time"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
+ configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test"
+ "github.com/stretchr/testify/require"
+)
+
+func TestMultinetSection(t *testing.T) {
+ t.Run("defaults", func(t *testing.T) {
+ empty := configtest.EmptyConfig()
+ require.Equal(t, false, Enabled(empty))
+ require.Equal(t, ([]Subnet)(nil), Subnets(empty))
+ require.Equal(t, "", Balancer(empty))
+ require.Equal(t, false, Restrict(empty))
+ require.Equal(t, FallbackDelayDefault, FallbackDelay(empty))
+ })
+
+ const path = "../../../../config/example/node"
+
+ fileConfigTest := func(c *config.Config) {
+ require.Equal(t, true, Enabled(c))
+ require.Equal(t, []Subnet{
+ {
+ Mask: "192.168.219.174/24",
+ SourceIPs: []string{
+ "192.168.218.185",
+ "192.168.219.185",
+ },
+ },
+ {
+ Mask: "10.78.70.74/24",
+ SourceIPs: []string{
+ "10.78.70.185",
+ "10.78.71.185",
+ },
+ },
+ }, Subnets(c))
+ require.Equal(t, "roundrobin", Balancer(c))
+ require.Equal(t, false, Restrict(c))
+ require.Equal(t, 350*time.Millisecond, FallbackDelay(c))
+ }
+
+ configtest.ForEachFileType(path, fileConfigTest)
+
+ t.Run("ENV", func(t *testing.T) {
+ configtest.ForEnvFileType(t, path, fileConfigTest)
+ })
+}
diff --git a/config/example/ir.env b/config/example/ir.env
index 7234a4b32..ebd91c243 100644
--- a/config/example/ir.env
+++ b/config/example/ir.env
@@ -80,3 +80,12 @@ FROSTFS_IR_PPROF_MUTEX_RATE=10000
FROSTFS_IR_PROMETHEUS_ENABLED=true
FROSTFS_IR_PROMETHEUS_ADDRESS=localhost:9090
FROSTFS_IR_PROMETHEUS_SHUTDOWN_TIMEOUT=30s
+
+FROSTFS_MULTINET_ENABLED=true
+FROSTFS_MULTINET_SUBNETS_0_MASK="192.168.219.174/24"
+FROSTFS_MULTINET_SUBNETS_0_SOURCE_IPS="192.168.218.185 192.168.219.185"
+FROSTFS_MULTINET_SUBNETS_1_MASK="10.78.70.74/24"
+FROSTFS_MULTINET_SUBNETS_1_SOURCE_IPS="10.78.70.185 10.78.71.185"
+FROSTFS_MULTINET_BALANCER=roundrobin
+FROSTFS_MULTINET_RESTRICT=false
+FROSTFS_MULTINET_FALLBACK_DELAY=350ms
diff --git a/config/example/ir.yaml b/config/example/ir.yaml
index 4c64f088b..49f9fd324 100644
--- a/config/example/ir.yaml
+++ b/config/example/ir.yaml
@@ -123,3 +123,18 @@ prometheus:
systemdnotify:
enabled: true
+
+multinet:
+ enabled: true
+ subnets:
+ - mask: 192.168.219.174/24
+ source_ips:
+ - 192.168.218.185
+ - 192.168.219.185
+ - mask: 10.78.70.74/24
+ source_ips:
+ - 10.78.70.185
+ - 10.78.71.185
+ balancer: roundrobin
+ restrict: false
+ fallback_delay: 350ms
diff --git a/config/example/node.env b/config/example/node.env
index 6618a981a..580d343fb 100644
--- a/config/example/node.env
+++ b/config/example/node.env
@@ -206,3 +206,13 @@ FROSTFS_RUNTIME_SOFT_MEMORY_LIMIT=1073741824
# AUDIT section
FROSTFS_AUDIT_ENABLED=true
+
+# MULTINET section
+FROSTFS_MULTINET_ENABLED=true
+FROSTFS_MULTINET_SUBNETS_0_MASK="192.168.219.174/24"
+FROSTFS_MULTINET_SUBNETS_0_SOURCE_IPS="192.168.218.185 192.168.219.185"
+FROSTFS_MULTINET_SUBNETS_1_MASK="10.78.70.74/24"
+FROSTFS_MULTINET_SUBNETS_1_SOURCE_IPS="10.78.70.185 10.78.71.185"
+FROSTFS_MULTINET_BALANCER=roundrobin
+FROSTFS_MULTINET_RESTRICT=false
+FROSTFS_MULTINET_FALLBACK_DELAY=350ms
diff --git a/config/example/node.json b/config/example/node.json
index 0d100ed80..3470d2d12 100644
--- a/config/example/node.json
+++ b/config/example/node.json
@@ -264,5 +264,27 @@
},
"audit": {
"enabled": true
+ },
+ "multinet": {
+ "enabled": true,
+ "subnets": [
+ {
+ "mask": "192.168.219.174/24",
+ "source_ips": [
+ "192.168.218.185",
+ "192.168.219.185"
+ ]
+ },
+ {
+ "mask": "10.78.70.74/24",
+ "source_ips":[
+ "10.78.70.185",
+ "10.78.71.185"
+ ]
+ }
+ ],
+ "balancer": "roundrobin",
+ "restrict": false,
+ "fallback_delay": "350ms"
}
}
diff --git a/config/example/node.yaml b/config/example/node.yaml
index 2a80fba18..2a963fc0f 100644
--- a/config/example/node.yaml
+++ b/config/example/node.yaml
@@ -240,3 +240,18 @@ runtime:
audit:
enabled: true
+
+multinet:
+ enabled: true
+ subnets:
+ - mask: 192.168.219.174/24
+ source_ips:
+ - 192.168.218.185
+ - 192.168.219.185
+ - mask: 10.78.70.74/24
+ source_ips:
+ - 10.78.70.185
+ - 10.78.71.185
+ balancer: roundrobin
+ restrict: false
+ fallback_delay: 350ms
diff --git a/docs/storage-node-configuration.md b/docs/storage-node-configuration.md
index c74695e2b..2b94400df 100644
--- a/docs/storage-node-configuration.md
+++ b/docs/storage-node-configuration.md
@@ -25,8 +25,8 @@ There are some custom types used for brevity:
| `replicator` | [Replicator service configuration](#replicator-section) |
| `storage` | [Storage engine configuration](#storage-section) |
| `runtime` | [Runtime configuration](#runtime-section) |
-| `audit` | [Audit configuration](#audit-section) |
-
+| `audit` | [Audit configuration](#audit-section) |
+| `multinet` | [Multinet configuration](#multinet-section) |
# `control` section
```yaml
@@ -435,6 +435,35 @@ audit:
enabled: true
```
-| Parameter | Type | Default value | Description |
-|---------------------|--------|---------------|---------------------------------------------------|
-| `soft_memory_limit` | `bool` | false | If `true` then audit event logs will be recorded. |
+| Parameter | Type | Default value | Description |
+|-----------|--------|---------------|---------------------------------------------------|
+| `enabled` | `bool` | false | If `true` then audit event logs will be recorded. |
+
+
+# `multinet` section
+Contains multinet parameters.
+
+```yaml
+multinet:
+ enabled: true
+ subnets:
+ - mask: 192.168.219.174/24
+ source_ips:
+ - 192.168.218.185
+ - 192.168.219.185
+ - mask: 10.78.70.74/24
+ source_ips:
+ - 10.78.70.185
+ - 10.78.71.185
+ balancer: roundrobin
+ restrict: false
+ fallback_delay: 350ms
+```
+
+| Parameter | Type | Default value | Description |
+| ---------------- | ---------- | ------------- | -------------------------------------------------------------------------------------------------------------------------- |
+| `enabled` | `bool` | false | If `true` then source-based routing is enabled. |
+| `subnets` | `subnet` | empty | Resulting subnets. |
+| `balancer` | `string` | "" | Balancer to select network interfaces, allowed values are "" (no balancing, use first suitable interface) or "roundrobin". |
+| `restrict` | `bool` | false | If `true` then any requests that do not match `subnets` will fail. |
+| `fallback_delay` | `duration` | 350ms | Delay before fallback to secondary IP addresses in case of hostname resolve. |
From 74db7352653b67e67e9345e0659fd37047fec710 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Tue, 8 Oct 2024 17:25:37 +0300
Subject: [PATCH 214/705] [#1422] node: Add dialer source to config
Signed-off-by: Dmitrii Stepanov
---
cmd/frostfs-node/config.go | 30 +++++++++++++++
go.mod | 1 +
go.sum | 2 +
internal/logs/logs.go | 1 +
internal/net/config.go | 66 +++++++++++++++++++++++++++++++++
internal/net/dial_target.go | 54 +++++++++++++++++++++++++++
internal/net/dialer.go | 35 ++++++++++++++++++
internal/net/dialer_source.go | 69 +++++++++++++++++++++++++++++++++++
8 files changed, 258 insertions(+)
create mode 100644 internal/net/config.go
create mode 100644 internal/net/dial_target.go
create mode 100644 internal/net/dialer.go
create mode 100644 internal/net/dialer_source.go
diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go
index 3c7e310b4..dc1bad485 100644
--- a/cmd/frostfs-node/config.go
+++ b/cmd/frostfs-node/config.go
@@ -26,12 +26,14 @@ import (
fstreeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/fstree"
loggerconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/logger"
morphconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/morph"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/multinet"
nodeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/node"
objectconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/object"
replicatorconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/replicator"
tracingconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
+ internalNet "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/chainbase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid"
@@ -436,6 +438,8 @@ type shared struct {
metricsCollector *metrics.NodeMetrics
metricsSvc *objectService.MetricCollector
+
+ dialerSource *internalNet.DialerSource
}
// dynamicConfiguration stores parameters of the
@@ -760,6 +764,9 @@ func initShared(appCfg *config.Config, key *keys.PrivateKey, netState *networkSt
persistate, err := state.NewPersistentStorage(nodeconfig.PersistentState(appCfg).Path())
fatalOnErr(err)
+ ds, err := internalNet.NewDialerSource(internalNetConfig(appCfg))
+ fatalOnErr(err)
+
cacheOpts := cache.ClientCacheOpts{
DialTimeout: apiclientconfig.DialTimeout(appCfg),
StreamTimeout: apiclientconfig.StreamTimeout(appCfg),
@@ -778,9 +785,27 @@ func initShared(appCfg *config.Config, key *keys.PrivateKey, netState *networkSt
putClientCache: cache.NewSDKClientCache(cacheOpts),
persistate: persistate,
metricsCollector: metrics.NewNodeMetrics(),
+ dialerSource: ds,
}
}
+func internalNetConfig(appCfg *config.Config) internalNet.Config {
+ result := internalNet.Config{
+ Enabled: multinet.Enabled(appCfg),
+ Balancer: multinet.Balancer(appCfg),
+ Restrict: multinet.Restrict(appCfg),
+ FallbackDelay: multinet.FallbackDelay(appCfg),
+ }
+ sn := multinet.Subnets(appCfg)
+ for _, s := range sn {
+ result.Subnets = append(result.Subnets, internalNet.Subnet{
+ Prefix: s.Mask,
+ SourceIPs: s.SourceIPs,
+ })
+ }
+ return result
+}
+
func initNetmap(appCfg *config.Config, netState *networkState, relayOnly bool) cfgNetmap {
netmapWorkerPool, err := ants.NewPool(notificationHandlerPoolSize)
fatalOnErr(err)
@@ -1336,6 +1361,11 @@ func (c *cfg) reloadConfig(ctx context.Context) {
}
}
+ if err := c.dialerSource.Update(internalNetConfig(c.appCfg)); err != nil {
+ c.log.Error(logs.FailedToUpdateMultinetConfiguration, zap.Error(err))
+ return
+ }
+
c.log.Info(logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully)
}
diff --git a/go.mod b/go.mod
index 1468c12b2..a84d3122a 100644
--- a/go.mod
+++ b/go.mod
@@ -11,6 +11,7 @@ require (
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20240909114314-666d326cc573
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20241010110344-99c5c5836509
git.frostfs.info/TrueCloudLab/hrw v1.2.1
+ git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240814080254-96225afacb88
git.frostfs.info/TrueCloudLab/tzhash v1.8.0
git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02
diff --git a/go.sum b/go.sum
index 5ce81807a..43d53aa40 100644
--- a/go.sum
+++ b/go.sum
@@ -14,6 +14,8 @@ git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20241010110344-99c5c5836509
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20241010110344-99c5c5836509/go.mod h1:jmb7yxzZota9jWbC10p+7YR+6wwJPBj7J/Fl5VDkXys=
git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc=
git.frostfs.info/TrueCloudLab/hrw v1.2.1/go.mod h1:C1Ygde2n843yTZEQ0FP69jYiuaYV0kriLvP4zm8JuvM=
+git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 h1:/960fWeyn2AFHwQUwDsWB3sbP6lTEnFnMzLMM6tx6N8=
+git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972/go.mod h1:2hM42MBrlhvN6XToaW6OWNk5ZLcu1FhaukGgxtfpDDI=
git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20240726093631-5481339d6928 h1:LK3mCkNZkY48eBA9jnk1N0eQZLsZhOG+XYw4EBoKUjM=
git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20240726093631-5481339d6928/go.mod h1:bZyJexBlrja4ngxiBgo8by5pVHuAbhg9l09/8yVGDyg=
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240814080254-96225afacb88 h1:vgbfkcnIexZUm3vREBBSa/Gv1Whjd1SFCUd0A+IaGPQ=
diff --git a/internal/logs/logs.go b/internal/logs/logs.go
index b4bc31b0c..0e9d58f32 100644
--- a/internal/logs/logs.go
+++ b/internal/logs/logs.go
@@ -523,4 +523,5 @@ const (
WritecacheShrinkSkippedNotEmpty = "writecache shrink skipped: not empty"
BlobovniczatreeFailedToRemoveRebuildTempFile = "failed to remove rebuild temp file"
WritecacheCantGetObject = "can't get an object from fstree"
+ FailedToUpdateMultinetConfiguration = "failed to update multinet configuration"
)
diff --git a/internal/net/config.go b/internal/net/config.go
new file mode 100644
index 000000000..10450db23
--- /dev/null
+++ b/internal/net/config.go
@@ -0,0 +1,66 @@
+package net
+
+import (
+ "errors"
+ "fmt"
+ "net/netip"
+ "slices"
+ "time"
+
+ "git.frostfs.info/TrueCloudLab/multinet"
+)
+
+var errEmptySourceIPList = errors.New("empty source IP list")
+
+type Subnet struct {
+ Prefix string
+ SourceIPs []string
+}
+
+type Config struct {
+ Enabled bool
+ Subnets []Subnet
+ Balancer string
+ Restrict bool
+ FallbackDelay time.Duration
+}
+
+func (c Config) toMultinetConfig() (multinet.Config, error) {
+ var subnets []multinet.Subnet
+ for _, s := range c.Subnets {
+ var ms multinet.Subnet
+ p, err := netip.ParsePrefix(s.Prefix)
+ if err != nil {
+ return multinet.Config{}, fmt.Errorf("parse IP prefix '%s': %w", s.Prefix, err)
+ }
+ ms.Prefix = p
+ for _, ip := range s.SourceIPs {
+ addr, err := netip.ParseAddr(ip)
+ if err != nil {
+ return multinet.Config{}, fmt.Errorf("parse IP address '%s': %w", ip, err)
+ }
+ ms.SourceIPs = append(ms.SourceIPs, addr)
+ }
+ if len(ms.SourceIPs) == 0 {
+ return multinet.Config{}, errEmptySourceIPList
+ }
+ subnets = append(subnets, ms)
+ }
+ return multinet.Config{
+ Subnets: subnets,
+ Balancer: multinet.BalancerType(c.Balancer),
+ Restrict: c.Restrict,
+ FallbackDelay: c.FallbackDelay,
+ Dialer: newDefaulDialer(),
+ }, nil
+}
+
+func (c Config) equals(other Config) bool {
+ return c.Enabled == other.Enabled &&
+ slices.EqualFunc(c.Subnets, other.Subnets, func(lhs, rhs Subnet) bool {
+ return lhs.Prefix == rhs.Prefix && slices.Equal(lhs.SourceIPs, rhs.SourceIPs)
+ }) &&
+ c.Balancer == other.Balancer &&
+ c.Restrict == other.Restrict &&
+ c.FallbackDelay == other.FallbackDelay
+}
diff --git a/internal/net/dial_target.go b/internal/net/dial_target.go
new file mode 100644
index 000000000..6265f1860
--- /dev/null
+++ b/internal/net/dial_target.go
@@ -0,0 +1,54 @@
+// NOTE: code is taken from https://github.com/grpc/grpc-go/blob/v1.68.x/internal/transport/http_util.go
+
+/*
+ *
+ * Copyright 2014 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package net
+
+import (
+ "net/url"
+ "strings"
+)
+
+// parseDialTarget returns the network and address to pass to dialer.
+func parseDialTarget(target string) (string, string) {
+ net := "tcp"
+ m1 := strings.Index(target, ":")
+ m2 := strings.Index(target, ":/")
+ // handle unix:addr which will fail with url.Parse
+ if m1 >= 0 && m2 < 0 {
+ if n := target[0:m1]; n == "unix" {
+ return n, target[m1+1:]
+ }
+ }
+ if m2 >= 0 {
+ t, err := url.Parse(target)
+ if err != nil {
+ return net, target
+ }
+ scheme := t.Scheme
+ addr := t.Path
+ if scheme == "unix" {
+ if addr == "" {
+ addr = t.Host
+ }
+ return scheme, addr
+ }
+ }
+ return net, target
+}
diff --git a/internal/net/dialer.go b/internal/net/dialer.go
new file mode 100644
index 000000000..4537490f6
--- /dev/null
+++ b/internal/net/dialer.go
@@ -0,0 +1,35 @@
+package net
+
+import (
+ "context"
+ "net"
+ "syscall"
+ "time"
+
+ "golang.org/x/sys/unix"
+)
+
+type Dialer interface {
+ DialContext(ctx context.Context, network, address string) (net.Conn, error)
+}
+
+func newDefaulDialer() net.Dialer {
+ // From `grpc.WithContextDialer` comment:
+ //
+ // Note: All supported releases of Go (as of December 2023) override the OS
+ // defaults for TCP keepalive time and interval to 15s. To enable TCP keepalive
+ // with OS defaults for keepalive time and interval, use a net.Dialer that sets
+ // the KeepAlive field to a negative value, and sets the SO_KEEPALIVE socket
+ // option to true from the Control field. For a concrete example of how to do
+ // this, see internal.NetDialerWithTCPKeepalive().
+ //
+ // https://github.com/grpc/grpc-go/blob/830135e6c5a351abf75f0c9cfdf978e5df8daeba/dialoptions.go#L432
+ return net.Dialer{
+ KeepAlive: time.Duration(-1),
+ Control: func(_, _ string, c syscall.RawConn) error {
+ return c.Control(func(fd uintptr) {
+ unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_KEEPALIVE, 1)
+ })
+ },
+ }
+}
diff --git a/internal/net/dialer_source.go b/internal/net/dialer_source.go
new file mode 100644
index 000000000..e6a142a08
--- /dev/null
+++ b/internal/net/dialer_source.go
@@ -0,0 +1,69 @@
+package net
+
+import (
+ "context"
+ "net"
+ "sync"
+
+ "git.frostfs.info/TrueCloudLab/multinet"
+)
+
+type DialerSource struct {
+ guard sync.RWMutex
+
+ c Config
+
+ md multinet.Dialer
+}
+
+func NewDialerSource(c Config) (*DialerSource, error) {
+ result := &DialerSource{}
+ if err := result.build(c); err != nil {
+ return nil, err
+ }
+ return result, nil
+}
+
+func (s *DialerSource) build(c Config) error {
+ if c.Enabled {
+ mc, err := c.toMultinetConfig()
+ if err != nil {
+ return err
+ }
+ md, err := multinet.NewDialer(mc)
+ if err != nil {
+ return err
+ }
+ s.md = md
+ s.c = c
+ return nil
+ }
+ s.md = nil
+ s.c = c
+ return nil
+}
+
+// GrpcContextDialer returns grpc.WithContextDialer func.
+// Returns nil if multinet disabled.
+func (s *DialerSource) GrpcContextDialer() func(context.Context, string) (net.Conn, error) {
+ s.guard.RLock()
+ defer s.guard.RUnlock()
+
+ if s.c.Enabled {
+ return func(ctx context.Context, address string) (net.Conn, error) {
+ network, address := parseDialTarget(address)
+ return s.md.DialContext(ctx, network, address)
+ }
+ }
+ return nil
+}
+
+func (s *DialerSource) Update(c Config) error {
+ s.guard.Lock()
+ defer s.guard.Unlock()
+
+ if s.c.equals(c) {
+ return nil
+ }
+ return s.build(c)
+}
From 6c96cc2af6eb9cb64e747c5b758fbec4d90c7287 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Wed, 9 Oct 2024 11:11:44 +0300
Subject: [PATCH 215/705] [#1422] node: Use dialer source for SDK cache
Signed-off-by: Dmitrii Stepanov
---
cmd/frostfs-node/config.go | 1 +
internal/net/dialer.go | 6 +++++-
pkg/network/cache/client.go | 2 ++
pkg/network/cache/multi.go | 25 ++++++++++++++-----------
4 files changed, 22 insertions(+), 12 deletions(-)
diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go
index dc1bad485..d44597857 100644
--- a/cmd/frostfs-node/config.go
+++ b/cmd/frostfs-node/config.go
@@ -773,6 +773,7 @@ func initShared(appCfg *config.Config, key *keys.PrivateKey, netState *networkSt
Key: &key.PrivateKey,
AllowExternal: apiclientconfig.AllowExternal(appCfg),
ReconnectTimeout: apiclientconfig.ReconnectTimeout(appCfg),
+ DialerSource: ds,
}
return shared{
diff --git a/internal/net/dialer.go b/internal/net/dialer.go
index 4537490f6..daf0f815f 100644
--- a/internal/net/dialer.go
+++ b/internal/net/dialer.go
@@ -13,6 +13,10 @@ type Dialer interface {
DialContext(ctx context.Context, network, address string) (net.Conn, error)
}
+func DialContextTCP(ctx context.Context, address string, d Dialer) (net.Conn, error) {
+ return d.DialContext(ctx, "tcp", address)
+}
+
func newDefaulDialer() net.Dialer {
// From `grpc.WithContextDialer` comment:
//
@@ -28,7 +32,7 @@ func newDefaulDialer() net.Dialer {
KeepAlive: time.Duration(-1),
Control: func(_, _ string, c syscall.RawConn) error {
return c.Control(func(fd uintptr) {
- unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_KEEPALIVE, 1)
+ _ = unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_KEEPALIVE, 1)
})
},
}
diff --git a/pkg/network/cache/client.go b/pkg/network/cache/client.go
index 371d3c76f..63ae0bfdb 100644
--- a/pkg/network/cache/client.go
+++ b/pkg/network/cache/client.go
@@ -5,6 +5,7 @@ import (
"sync"
"time"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net"
clientcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
)
@@ -25,6 +26,7 @@ type (
Key *ecdsa.PrivateKey
ResponseCallback func(client.ResponseMetaInfo) error
AllowExternal bool
+ DialerSource *net.DialerSource
}
)
diff --git a/pkg/network/cache/multi.go b/pkg/network/cache/multi.go
index b83cbb217..e936ead65 100644
--- a/pkg/network/cache/multi.go
+++ b/pkg/network/cache/multi.go
@@ -60,18 +60,21 @@ func (x *multiClient) createForAddress(ctx context.Context, addr network.Address
prmInit.Key = *x.opts.Key
}
+ grpcOpts := []grpc.DialOption{
+ grpc.WithChainUnaryInterceptor(
+ metrics.NewUnaryClientInterceptor(),
+ tracing.NewUnaryClientInteceptor(),
+ ),
+ grpc.WithChainStreamInterceptor(
+ metrics.NewStreamClientInterceptor(),
+ tracing.NewStreamClientInterceptor(),
+ ),
+ grpc.WithContextDialer(x.opts.DialerSource.GrpcContextDialer()),
+ }
+
prmDial := client.PrmDial{
- Endpoint: addr.URIAddr(),
- GRPCDialOptions: []grpc.DialOption{
- grpc.WithChainUnaryInterceptor(
- metrics.NewUnaryClientInterceptor(),
- tracing.NewUnaryClientInteceptor(),
- ),
- grpc.WithChainStreamInterceptor(
- metrics.NewStreamClientInterceptor(),
- tracing.NewStreamClientInterceptor(),
- ),
- },
+ Endpoint: addr.URIAddr(),
+ GRPCDialOptions: grpcOpts,
}
if x.opts.DialTimeout > 0 {
prmDial.DialTimeout = x.opts.DialTimeout
From e314f328c4806bf1b34b6e3c31abdc4afdfaaac4 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Wed, 9 Oct 2024 11:18:24 +0300
Subject: [PATCH 216/705] [#1422] tree: Use dialer source for tree service
connections
Signed-off-by: Dmitrii Stepanov
---
cmd/frostfs-node/tree.go | 1 +
pkg/services/tree/cache.go | 6 +++++-
pkg/services/tree/options.go | 8 ++++++++
pkg/services/tree/service.go | 2 +-
4 files changed, 15 insertions(+), 2 deletions(-)
diff --git a/cmd/frostfs-node/tree.go b/cmd/frostfs-node/tree.go
index 192f08471..f188e2fbc 100644
--- a/cmd/frostfs-node/tree.go
+++ b/cmd/frostfs-node/tree.go
@@ -67,6 +67,7 @@ func initTreeService(c *cfg) {
tree.WithAPELocalOverrideStorage(c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.LocalStorage()),
tree.WithAPEMorphRuleStorage(c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.MorphRuleChainStorage()),
tree.WithNetmapState(c.cfgNetmap.state),
+ tree.WithDialerSource(c.dialerSource),
)
c.cfgGRPC.performAndSave(func(_ string, _ net.Listener, s *grpc.Server) {
diff --git a/pkg/services/tree/cache.go b/pkg/services/tree/cache.go
index 38501b852..e490cb855 100644
--- a/pkg/services/tree/cache.go
+++ b/pkg/services/tree/cache.go
@@ -8,6 +8,7 @@ import (
"sync"
"time"
+ internalNet "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc"
tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc"
@@ -21,6 +22,7 @@ type clientCache struct {
sync.Mutex
simplelru.LRU[string, cacheItem]
key *ecdsa.PrivateKey
+ ds *internalNet.DialerSource
}
type cacheItem struct {
@@ -36,7 +38,7 @@ const (
var errRecentlyFailed = errors.New("client has recently failed")
-func (c *clientCache) init(pk *ecdsa.PrivateKey) {
+func (c *clientCache) init(pk *ecdsa.PrivateKey, ds *internalNet.DialerSource) {
l, _ := simplelru.NewLRU(defaultClientCacheSize, func(_ string, value cacheItem) {
if conn := value.cc; conn != nil {
_ = conn.Close()
@@ -44,6 +46,7 @@ func (c *clientCache) init(pk *ecdsa.PrivateKey) {
})
c.LRU = *l
c.key = pk
+ c.ds = ds
}
func (c *clientCache) get(ctx context.Context, netmapAddr string) (TreeServiceClient, error) {
@@ -99,6 +102,7 @@ func (c *clientCache) dialTreeService(ctx context.Context, netmapAddr string) (*
metrics.NewStreamClientInterceptor(),
tracing.NewStreamClientInterceptor(),
),
+ grpc.WithContextDialer(c.ds.GrpcContextDialer()),
}
if !netAddr.IsTLSEnabled() {
diff --git a/pkg/services/tree/options.go b/pkg/services/tree/options.go
index 1db5607f6..1633ae557 100644
--- a/pkg/services/tree/options.go
+++ b/pkg/services/tree/options.go
@@ -4,6 +4,7 @@ import (
"crypto/ecdsa"
"time"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
@@ -45,6 +46,7 @@ type cfg struct {
morphChainStorage policyengine.MorphRuleChainStorageReader
metrics MetricsRegister
+ ds *net.DialerSource
}
// Option represents configuration option for a tree service.
@@ -161,3 +163,9 @@ func WithNetmapState(state netmap.State) Option {
c.state = state
}
}
+
+func WithDialerSource(ds *net.DialerSource) Option {
+ return func(c *cfg) {
+ c.ds = ds
+ }
+}
diff --git a/pkg/services/tree/service.go b/pkg/services/tree/service.go
index 60bb1a6ad..2cb2af294 100644
--- a/pkg/services/tree/service.go
+++ b/pkg/services/tree/service.go
@@ -65,7 +65,7 @@ func New(opts ...Option) *Service {
s.log = &logger.Logger{Logger: zap.NewNop()}
}
- s.cache.init(s.key)
+ s.cache.init(s.key, s.ds)
s.closeCh = make(chan struct{})
s.replicateCh = make(chan movePair, s.replicatorChannelCapacity)
s.replicateLocalCh = make(chan applyOp)
From 5b653aa65fd65759ee733fd552d17870c6660b13 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Wed, 9 Oct 2024 11:34:36 +0300
Subject: [PATCH 217/705] [#1422] morph: Drop single client as not used
Signed-off-by: Dmitrii Stepanov