Compare commits

..

20 commits

Author SHA1 Message Date
3bcc31696b [#1261] shard: Fix delete objects from FSTree
Replace nil storageID with empty like by shard.Get.

Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2024-07-23 17:47:32 +03:00
845d8be672 [#9999] blobovnicza: Drop leaf limit
Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2024-07-23 12:27:19 +03:00
498f9955ea [#1089] control: Add USER and GROUP targets for local override storage
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2024-04-12 17:35:50 +03:00
f8973f9b05 [#1089] control: Format proto files with clang-format
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2024-04-12 17:35:50 +03:00
50ec4febcc [#1089] ape: Provide request actor as an additional target
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2024-04-12 17:35:50 +03:00
59d7a6940d [#1090] tree: Make workaround for APE checks
* Make `verifyClient` method perform APE check if a container
  was created with zero-filled basic ACL.
* Object verbs are used in APE, until tree verbs are introduced.

Signed-off-by: Airat Arifullin <a.arifullin@yadro.com>
2024-04-12 12:02:28 +03:00
2ecd427df4 [#1090] ape: Move ape request and resource implementations to common package
Signed-off-by: Airat Arifullin <a.arifullin@yadro.com>
2024-04-12 12:02:23 +03:00
573ca6d0d5 [#1090] go.mod: Update policy-engine version
Signed-off-by: Airat Arifullin <a.arifullin@yadro.com>
2024-04-12 12:02:18 +03:00
1eb47ab2ce [#1080] metabase: Add StorageID metric
Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2024-04-10 10:00:58 +03:00
954881f1ef [#1080] metabase: Open bucket for container counter once
Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2024-04-10 10:00:58 +03:00
7809928b64 [#1080] ape: Use value for APE request
Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2024-04-09 18:55:27 +03:00
4b902be81e [#1080] ape: Do not read object headers before Head/Get
Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2024-04-09 18:55:10 +03:00
161d33c2b7 [#1062] object: Fix buffer allocation for PayloadRange
Signed-off-by: Anton Nikiforov <an.nikiforov@yadro.com>
2024-04-09 13:10:41 +03:00
1a7c3db67f [#1077] objectsvc: Fix possible panic in GetRange()
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2024-04-05 16:13:09 +03:00
c1d90f018b [#1074] pilorama: Allow empty filenames in SortedByFilename()
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2024-04-04 10:27:45 +00:00
064e18b277 [#1074] pilorama: Remove debug print in tests
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2024-04-04 10:27:45 +00:00
21caa904f4 [#1072] Fix issue from govulncheck
Signed-off-by: Anton Nikiforov <an.nikiforov@yadro.com>
2024-04-04 11:37:53 +03:00
f74d058c2e [#1072] node, ir, morph: Set scope None when in upgrade mode
Signed-off-by: Anton Nikiforov <an.nikiforov@yadro.com>
2024-04-04 11:24:45 +03:00
c8ce6e9fe4 [#1072] node, ir: Add new config option kludge_compatibility_mode
Signed-off-by: Anton Nikiforov <an.nikiforov@yadro.com>
2024-04-04 11:24:37 +03:00
748da78dc7 [#1072] Fix gofumpt issues
Signed-off-by: Anton Nikiforov <an.nikiforov@yadro.com>
2024-04-04 11:24:27 +03:00
675 changed files with 10080 additions and 23084 deletions

View file

@ -1,4 +1,4 @@
FROM golang:1.22 AS builder
FROM golang:1.21 as builder
ARG BUILD=now
ARG VERSION=dev
ARG REPO=repository

View file

@ -1,4 +1,4 @@
FROM golang:1.22
FROM golang:1.21
WORKDIR /tmp

View file

@ -1,4 +1,4 @@
FROM golang:1.22 AS builder
FROM golang:1.21 as builder
ARG BUILD=now
ARG VERSION=dev
ARG REPO=repository

View file

@ -1,4 +1,4 @@
FROM golang:1.22 AS builder
FROM golang:1.21 as builder
ARG BUILD=now
ARG VERSION=dev
ARG REPO=repository

View file

@ -1,4 +1,4 @@
FROM golang:1.22 AS builder
FROM golang:1.21 as builder
ARG BUILD=now
ARG VERSION=dev
ARG REPO=repository

View file

@ -8,7 +8,7 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
go_versions: [ '1.22', '1.23' ]
go_versions: [ '1.20', '1.21' ]
steps:
- uses: actions/checkout@v3

View file

@ -13,7 +13,7 @@ jobs:
- name: Setup Go
uses: actions/setup-go@v3
with:
go-version: '1.22'
go-version: '1.21'
- name: Run commit format checker
uses: https://git.frostfs.info/TrueCloudLab/dco-go@v3

View file

@ -1,25 +0,0 @@
name: Pre-commit hooks
on: [pull_request]
jobs:
precommit:
name: Pre-commit
env:
# Skip pre-commit hooks which are executed by other actions.
SKIP: make-lint,go-staticcheck-repo-mod,go-unit-tests,gofumpt
runs-on: ubuntu-22.04
# If we use actions/setup-python from either Github or Gitea,
# the line above fails with a cryptic error about not being able to find python.
# So install everything manually.
steps:
- uses: actions/checkout@v3
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version: 1.23
- name: Set up Python
run: |
apt update
apt install -y pre-commit
- name: Run pre-commit
run: pre-commit run --color=always --hook-stage manual --all-files

View file

@ -11,7 +11,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version: '1.23'
go-version: '1.21'
cache: true
- name: Install linters
@ -25,7 +25,7 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
go_versions: [ '1.22', '1.23' ]
go_versions: [ '1.20', '1.21' ]
fail-fast: false
steps:
- uses: actions/checkout@v3
@ -48,7 +48,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version: '1.22'
go-version: '1.21'
cache: true
- name: Run tests
@ -63,7 +63,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version: '1.23'
go-version: '1.21'
cache: true
- name: Install staticcheck
@ -81,7 +81,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version: '1.22'
go-version: '1.21'
cache: true
- name: Install gopls
@ -89,23 +89,3 @@ jobs:
- name: Run gopls
run: make gopls-run
fumpt:
name: Run gofumpt
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version: '1.23'
cache: true
- name: Install gofumpt
run: make fumpt-install
- name: Run gofumpt
run: |
make fumpt
git diff --exit-code --quiet

View file

@ -13,7 +13,7 @@ jobs:
- name: Setup Go
uses: actions/setup-go@v3
with:
go-version: '1.23'
go-version: '1.21'
- name: Install govulncheck
run: go install golang.org/x/vuln/cmd/govulncheck@latest

View file

@ -12,8 +12,7 @@ run:
# output configuration options
output:
# colored-line-number|line-number|json|tab|checkstyle|code-climate, default is "colored-line-number"
formats:
- format: tab
format: tab
# all available settings of specific linters
linters-settings:
@ -67,7 +66,7 @@ linters:
- bidichk
- durationcheck
- exhaustive
- copyloopvar
- exportloopref
- gofmt
- goimports
- misspell

View file

@ -16,7 +16,7 @@ repos:
- id: trailing-whitespace
args: [--markdown-linebreak-ext=md]
- id: end-of-file-fixer
exclude: "(.key|.svg)$"
exclude: ".key$"
- repo: https://github.com/shellcheck-py/shellcheck-py
rev: v0.9.0.6

View file

@ -9,74 +9,6 @@ Changelog for FrostFS Node
### Removed
### Updated
## [v0.42.0]
### Added
- Add audit logs for gRPC requests (#1184)
- Add CLI command to convert eACL to APE (#1189)
- Add `--await` flag to `control set-status` (#60)
- `app_info` metric for binary version (#1154)
- `--quiet` flag for healthcheck command (#1209)
### Changed
- Deprecate Container.SetEACL RPC (#1219)
### Fixed
- Take groups into account during APE processing (#1190)
- Handle double SIGHUP correctly (#1145)
- Handle empty filenames in tree listing (#1074)
- Handle duplicate tree nodes in the split-brain scenario (#1234, #1251)
- Remove APE pre-check in Object.GET/HEAD/RANGE RPC (#1249)
- Delete EC gc marks and split info (#1257)
- Do not search for non-existent objects on deletion (#1261)
### Updated
- Make putting EC chunks more robust (#1233)
## [v0.41.0]
### Added
- Support mTLS for morph client (#1170)
### Fixed
- Update shard state metric during shard init (#1174)
- Handle ENOSPC in blobovnicza (#1166)
- Handle multiple split-infos for EC objects (#1163)
- Set `Disabled` mode as the default for components (#1168)
## [v0.40.0]
### Added
- Support EC chunk reconstruction in policer (#1129)
- Support LOCK, DELETE and SEARCH methods on EC objects (#1147, 1144)
- apemanager service to manage APE chains (#1105)
### Fixed
- Properly verify GetRangeHash response (#1083)
- Send `MONOTONIC_USEC` in sdnotify on reload (#1135)
### Updated
- neo-go to `v0.106.0`
## [v0.39.0]
### Added
- Preliminary erasure coding support (#1065, #1112, #1103, #1120)
- TTL cache for blobovnicza tree (#1004)
- Cache for frostfsid and policy contracts (#1117)
- Writecache path to metric labels (#966)
- Documentation for authentication mechanisms (#1097, #1104)
- Metrics for metabase resync status (#1029)
### Changed
- Speed up metabase resync (#1024)
### Fixed
- Possible panic in GET_RANGE (#1077)
### Updated
- Minimum required Go version to 1.21
## [v0.38.0]
### Added

View file

@ -4,19 +4,20 @@ SHELL = bash
REPO ?= $(shell go list -m)
VERSION ?= $(shell git describe --tags --dirty --match "v*" --always --abbrev=8 2>/dev/null || cat VERSION 2>/dev/null || echo "develop")
HUB_IMAGE ?= git.frostfs.info/truecloudlab/frostfs
HUB_IMAGE ?= truecloudlab/frostfs
HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')"
GO_VERSION ?= 1.22
LINT_VERSION ?= 1.60.3
TRUECLOUDLAB_LINT_VERSION ?= 0.0.7
GO_VERSION ?= 1.21
LINT_VERSION ?= 1.56.1
TRUECLOUDLAB_LINT_VERSION ?= 0.0.5
PROTOC_VERSION ?= 25.0
PROTOC_GEN_GO_VERSION ?= $(shell go list -f '{{.Version}}' -m google.golang.org/protobuf)
PROTOGEN_FROSTFS_VERSION ?= $(shell go list -f '{{.Version}}' -m git.frostfs.info/TrueCloudLab/frostfs-api-go/v2)
PROTOC_OS_VERSION=osx-x86_64
ifeq ($(shell uname), Linux)
PROTOC_OS_VERSION=linux-x86_64
endif
STATICCHECK_VERSION ?= 2024.1.1
STATICCHECK_VERSION ?= 2023.1.6
ARCH = amd64
BIN = bin
@ -38,20 +39,14 @@ LINT_DIR = $(OUTPUT_LINT_DIR)/golangci-lint-$(LINT_VERSION)-v$(TRUECLOUDLAB_LINT
TMP_DIR := .cache
PROTOBUF_DIR ?= $(abspath $(BIN))/protobuf
PROTOC_DIR ?= $(PROTOBUF_DIR)/protoc-v$(PROTOC_VERSION)
PROTOC_GEN_GO_DIR ?= $(PROTOBUF_DIR)/protoc-gen-go-$(PROTOC_GEN_GO_VERSION)
PROTOGEN_FROSTFS_DIR ?= $(PROTOBUF_DIR)/protogen-$(PROTOGEN_FROSTFS_VERSION)
STATICCHECK_DIR ?= $(abspath $(BIN))/staticcheck
STATICCHECK_VERSION_DIR ?= $(STATICCHECK_DIR)/$(STATICCHECK_VERSION)
SOURCES = $(shell find . -type f -name "*.go" -print)
GOFUMPT_VERSION ?= v0.7.0
GOFUMPT_DIR ?= $(abspath $(BIN))/gofumpt
GOFUMPT_VERSION_DIR ?= $(GOFUMPT_DIR)/$(GOFUMPT_VERSION)
GOPLS_VERSION ?= v0.15.1
GOPLS_DIR ?= $(abspath $(BIN))/gopls
GOPLS_VERSION_DIR ?= $(GOPLS_DIR)/$(GOPLS_VERSION)
GOPLS_TEMP_FILE := $(shell mktemp)
FROSTFS_CONTRACTS_PATH=$(abspath ./../frostfs-contract)
LOCODE_DB_PATH=$(abspath ./.cache/locode_db)
@ -105,15 +100,17 @@ export-metrics: dep
# Regenerate proto files:
protoc:
@if [ ! -d "$(PROTOC_DIR)" ] || [ ! -d "$(PROTOGEN_FROSTFS_DIR)" ]; then \
@if [ ! -d "$(PROTOC_DIR)" ] || [ ! -d "$(PROTOC_GEN_GO_DIR)" ] || [ ! -d "$(PROTOGEN_FROSTFS_DIR)" ]; then \
make protoc-install; \
fi
@for f in `find . -type f -name '*.proto' -not -path './bin/*'`; do \
echo "⇒ Processing $$f "; \
$(PROTOC_DIR)/bin/protoc \
--proto_path=.:$(PROTOC_DIR)/include:/usr/local/include \
--plugin=protoc-gen-go=$(PROTOC_GEN_GO_DIR)/protoc-gen-go \
--plugin=protoc-gen-go-frostfs=$(PROTOGEN_FROSTFS_DIR)/protogen \
--go-frostfs_out=. --go-frostfs_opt=paths=source_relative \
--go_out=. --go_opt=paths=source_relative \
--go-grpc_opt=require_unimplemented_servers=false \
--go-grpc_out=. --go-grpc_opt=paths=source_relative $$f; \
done
@ -126,6 +123,8 @@ protoc-install:
@wget -q -O $(PROTOBUF_DIR)/protoc-$(PROTOC_VERSION).zip 'https://github.com/protocolbuffers/protobuf/releases/download/v$(PROTOC_VERSION)/protoc-$(PROTOC_VERSION)-$(PROTOC_OS_VERSION).zip'
@unzip -q -o $(PROTOBUF_DIR)/protoc-$(PROTOC_VERSION).zip -d $(PROTOC_DIR)
@rm $(PROTOBUF_DIR)/protoc-$(PROTOC_VERSION).zip
@echo "⇒ Installing protoc-gen-go..."
@GOBIN=$(PROTOC_GEN_GO_DIR) go install -v google.golang.org/protobuf/...@$(PROTOC_GEN_GO_VERSION)
@echo "⇒ Instaling protogen FrostFS plugin..."
@GOBIN=$(PROTOGEN_FROSTFS_DIR) go install -mod=mod -v git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/protogen@$(PROTOGEN_FROSTFS_VERSION)
@ -163,19 +162,10 @@ imports:
@echo "⇒ Processing goimports check"
@goimports -w cmd/ pkg/ misc/
# Install gofumpt
fumpt-install:
@rm -rf $(GOFUMPT_DIR)
@mkdir $(GOFUMPT_DIR)
@GOBIN=$(GOFUMPT_VERSION_DIR) go install mvdan.cc/gofumpt@$(GOFUMPT_VERSION)
# Run gofumpt
fumpt:
@if [ ! -d "$(GOFUMPT_VERSION_DIR)" ]; then \
make fumpt-install; \
fi
@echo "⇒ Processing gofumpt check"
$(GOFUMPT_VERSION_DIR)/gofumpt -l -w cmd/ pkg/ misc/
@gofumpt -l -w cmd/ pkg/ misc/
# Run Unit Test with go test
test: GOFLAGS ?= "-count=1"
@ -197,7 +187,7 @@ lint-install:
@@make -C $(TMP_DIR)/linters lib CGO_ENABLED=1 OUT_DIR=$(OUTPUT_LINT_DIR)
@rm -rf $(TMP_DIR)/linters
@rmdir $(TMP_DIR) 2>/dev/null || true
@CGO_ENABLED=1 GOBIN=$(LINT_DIR) go install -trimpath github.com/golangci/golangci-lint/cmd/golangci-lint@v$(LINT_VERSION)
@CGO_ENABLED=1 GOBIN=$(LINT_DIR) go install github.com/golangci/golangci-lint/cmd/golangci-lint@v$(LINT_VERSION)
# Run linters
lint:
@ -230,12 +220,9 @@ gopls-run:
@if [ ! -d "$(GOPLS_VERSION_DIR)" ]; then \
make gopls-install; \
fi
$(GOPLS_VERSION_DIR)/gopls check $(SOURCES) 2>&1 >$(GOPLS_TEMP_FILE)
@if [[ $$(wc -l < $(GOPLS_TEMP_FILE)) -ne 0 ]]; then \
cat $(GOPLS_TEMP_FILE); \
@if [[ $$(find . -type f -name "*.go" -print | xargs $(GOPLS_VERSION_DIR)/gopls check | tee /dev/tty | wc -l) -ne 0 ]]; then \
exit 1; \
fi
rm $(GOPLS_TEMP_FILE)
# Run linters in Docker
docker/lint:
@ -289,19 +276,15 @@ env-up: all
echo "Frostfs contracts not found"; exit 1; \
fi
${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph init --contracts ${FROSTFS_CONTRACTS_PATH}
${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph refill-gas --storage-wallet ./dev/storage/wallet01.json --gas 10.0
${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph refill-gas --storage-wallet ./dev/storage/wallet02.json --gas 10.0
${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph refill-gas --storage-wallet ./dev/storage/wallet03.json --gas 10.0
${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph refill-gas --storage-wallet ./dev/storage/wallet04.json --gas 10.0
${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph refill-gas --storage-wallet ./dev/storage/wallet.json --gas 10.0
@if [ ! -f "$(LOCODE_DB_PATH)" ]; then \
make locode-download; \
fi
mkdir -p ./$(TMP_DIR)/state
mkdir -p ./$(TMP_DIR)/storage
# Shutdown dev environment
env-down:
docker compose -f dev/docker-compose.yml down
docker volume rm -f frostfs-node_neo-go
rm -rf ./$(TMP_DIR)/state
rm -rf ./$(TMP_DIR)/storage
rm -f ./.cache/.frostfs-ir-state
rm -f ./.cache/.frostfs-node-state
rm -rf ./.cache/storage

View file

@ -7,8 +7,9 @@
</p>
---
[![Report](https://goreportcard.com/badge/git.frostfs.info/TrueCloudLab/frostfs-node)](https://goreportcard.com/report/git.frostfs.info/TrueCloudLab/frostfs-node)
![Release (latest)](https://git.frostfs.info/TrueCloudLab/frostfs-node/badges/release.svg)
[![Report](https://goreportcard.com/badge/github.com/TrueCloudLab/frostfs-node)](https://goreportcard.com/report/github.com/TrueCloudLab/frostfs-node)
![GitHub release (latest SemVer)](https://img.shields.io/github/v/release/TrueCloudLab/frostfs-node?sort=semver)
![License](https://img.shields.io/github/license/TrueCloudLab/frostfs-node.svg?style=popout)
# Overview
@ -32,8 +33,8 @@ manipulate large amounts of data without paying a prohibitive price.
FrostFS has a native [gRPC API](https://git.frostfs.info/TrueCloudLab/frostfs-api) and has
protocol gateways for popular protocols such as [AWS
S3](https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw),
[HTTP](https://git.frostfs.info/TrueCloudLab/frostfs-http-gw),
S3](https://github.com/TrueCloudLab/frostfs-s3-gw),
[HTTP](https://github.com/TrueCloudLab/frostfs-http-gw),
[FUSE](https://wikipedia.org/wiki/Filesystem_in_Userspace) and
[sFTP](https://en.wikipedia.org/wiki/SSH_File_Transfer_Protocol) allowing
developers to integrate applications without rewriting their code.
@ -44,11 +45,11 @@ Now, we only support GNU/Linux on amd64 CPUs with AVX/AVX2 instructions. More
platforms will be officially supported after release `1.0`.
The latest version of frostfs-node works with frostfs-contract
[v0.19.2](https://git.frostfs.info/TrueCloudLab/frostfs-contract/releases/tag/v0.19.2).
[v0.16.0](https://github.com/TrueCloudLab/frostfs-contract/releases/tag/v0.16.0).
# Building
To make all binaries you need Go 1.22+ and `make`:
To make all binaries you need Go 1.20+ and `make`:
```
make all
```
@ -70,7 +71,7 @@ make docker/bin/frostfs-<name> # build a specific binary
## Docker images
To make docker images suitable for use in [frostfs-dev-env](https://git.frostfs.info/TrueCloudLab/frostfs-dev-env/) use:
To make docker images suitable for use in [frostfs-dev-env](https://github.com/TrueCloudLab/frostfs-dev-env/) use:
```
make images
```
@ -124,7 +125,7 @@ the feature/topic you are going to implement.
# Credits
FrostFS is maintained by [True Cloud Lab](https://git.frostfs.info/TrueCloudLab/) with the help and
FrostFS is maintained by [True Cloud Lab](https://github.com/TrueCloudLab/) with the help and
contributions from community members.
Please see [CREDITS](CREDITS.md) for details.

View file

@ -1 +1 @@
v0.42.0
v0.38.0

View file

@ -56,8 +56,7 @@ credentials: # passwords for consensus node / alphabet wallets
#### Network deployment
- `generate-alphabet` generates a set of wallets for consensus and
Alphabet nodes. The list of the name for alphabet wallets(no gaps between names allowed, order is important):
- az, buky, vedi, glagoli, dobro, yest, zhivete, dzelo, zemlja, izhe, izhei, gerv, kako, ljudi, mislete, nash, on, pokoj, rtsi, slovo, tverdo, uk
Alphabet nodes.
- `init` initializes the sidechain by deploying smart contracts and
setting provided FrostFS network configuration.

View file

@ -9,8 +9,8 @@ related configuration details.
To follow this guide you need:
- latest released version of [neo-go](https://github.com/nspcc-dev/neo-go/releases) (v0.97.2 at the moment),
- latest released version of [frostfs-adm](https://git.frostfs.info/TrueCloudLab/frostfs-node/releases) utility (v0.42.9 at the moment),
- latest released version of compiled [frostfs-contract](https://git.frostfs.info/TrueCloudLab/frostfs-contract/releases) (v0.19.2 at the moment).
- latest released version of [frostfs-adm](https://github.com/TrueCloudLab/frostfs-node/releases) utility (v0.25.1 at the moment),
- latest released version of compiled [frostfs-contract](https://github.com/TrueCloudLab/frostfs-contract/releases) (v0.11.0 at the moment).
## Step 1: Prepare network configuration
@ -34,8 +34,6 @@ alphabet-wallets: /home/user/deploy/alphabet-wallets
network:
max_object_size: 67108864
epoch_duration: 240
max_ec_data_count: 12
max_ec_parity_count: 4
fee:
candidate: 0
container: 0
@ -64,11 +62,6 @@ alphabet-wallets: /home/user/deploy/alphabet-wallets
wallet[0]: hunter2
```
This command generates wallets with the following names:
- az, buky, vedi, glagoli, dobro, yest, zhivete, dzelo, zemlja, izhe, izhei, gerv, kako, ljudi, mislete, nash, on, pokoj, rtsi, slovo, tverdo, uk
No gaps between names allowed, order is important.
Do not lose wallet files and network config. Store it in an encrypted backed up
storage.

View file

@ -26,8 +26,6 @@ const (
ContractsURLFlagDesc = "URL to archive with compiled FrostFS contracts"
EpochDurationInitFlag = "network.epoch_duration"
MaxObjectSizeInitFlag = "network.max_object_size"
MaxECDataCountFlag = "network.max_ec_data_count"
MaxECParityCounFlag = "network.max_ec_parity_count"
RefillGasAmountFlag = "gas"
StorageWalletFlag = "storage-wallet"
ContainerFeeInitFlag = "network.fee.container"

View file

@ -21,8 +21,6 @@ type configTemplate struct {
CandidateFee int
ContainerFee int
ContainerAliasFee int
MaxECDataCount int
MaxECParityCount int
WithdrawFee int
Glagolitics []string
HomomorphicHashDisabled bool
@ -33,8 +31,6 @@ alphabet-wallets: {{ .AlphabetDir}}
network:
max_object_size: {{ .MaxObjectSize}}
epoch_duration: {{ .EpochDuration}}
max_ec_data_count: {{ .MaxECDataCount}}
max_ec_parity_count: {{ .MaxECParityCount}}
homomorphic_hash_disabled: {{ .HomomorphicHashDisabled}}
fee:
candidate: {{ .CandidateFee}}
@ -110,8 +106,6 @@ func generateConfigExample(appDir string, credSize int) (string, error) {
tmpl := configTemplate{
Endpoint: "https://neo.rpc.node:30333",
MaxObjectSize: 67108864, // 64 MiB
MaxECDataCount: 12, // Tested with 16-node networks, assuming 12 data + 4 parity nodes.
MaxECParityCount: 4, // Maximum 4 parity chunks, typically <= 3 for most policies.
EpochDuration: 240, // 1 hour with 15s per block
HomomorphicHashDisabled: false, // object homomorphic hash is enabled
CandidateFee: 100_0000_0000, // 100.0 GAS (Fixed8)

View file

@ -27,8 +27,6 @@ func TestGenerateConfigExample(t *testing.T) {
require.Equal(t, "https://neo.rpc.node:30333", v.GetString("rpc-endpoint"))
require.Equal(t, filepath.Join(appDir, "alphabet-wallets"), v.GetString("alphabet-wallets"))
require.Equal(t, 67108864, v.GetInt("network.max_object_size"))
require.Equal(t, 12, v.GetInt("network.max_ec_data_count"))
require.Equal(t, 4, v.GetInt("network.max_ec_parity_count"))
require.Equal(t, 240, v.GetInt("network.epoch_duration"))
require.Equal(t, 10000000000, v.GetInt("network.fee.candidate"))
require.Equal(t, 1000, v.GetInt("network.fee.container"))

View file

@ -1,15 +0,0 @@
package metabase
import "github.com/spf13/cobra"
// RootCmd is a root command of config section.
var RootCmd = &cobra.Command{
Use: "metabase",
Short: "Section for metabase commands",
}
func init() {
RootCmd.AddCommand(UpgradeCmd)
initUpgradeCommand()
}

View file

@ -1,99 +0,0 @@
package metabase
import (
"errors"
"fmt"
"sync"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
engineconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine"
shardconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"github.com/spf13/cobra"
"golang.org/x/sync/errgroup"
)
const (
pathFlag = "path"
noCompactFlag = "no-compact"
)
var errNoPathsFound = errors.New("no metabase paths found")
var path string
var UpgradeCmd = &cobra.Command{
Use: "upgrade",
Short: "Upgrade metabase to latest version",
RunE: upgrade,
}
func upgrade(cmd *cobra.Command, _ []string) error {
configFile, err := cmd.Flags().GetString(commonflags.ConfigFlag)
if err != nil {
return err
}
configDir, err := cmd.Flags().GetString(commonflags.ConfigDirFlag)
if err != nil {
return err
}
noCompact, _ := cmd.Flags().GetBool(noCompactFlag)
var paths []string
if path != "" {
paths = append(paths, path)
}
appCfg := config.New(configFile, configDir, config.EnvPrefix)
if err := engineconfig.IterateShards(appCfg, false, func(sc *shardconfig.Config) error {
paths = append(paths, sc.Metabase().Path())
return nil
}); err != nil {
return fmt.Errorf("failed to get metabase paths: %w", err)
}
if len(paths) == 0 {
return errNoPathsFound
}
cmd.Println("found", len(paths), "metabases:")
for i, path := range paths {
cmd.Println(i+1, ":", path)
}
result := make(map[string]bool)
var resultGuard sync.Mutex
eg, ctx := errgroup.WithContext(cmd.Context())
for _, path := range paths {
eg.Go(func() error {
var success bool
cmd.Println("upgrading metabase", path, "...")
if err := meta.Upgrade(ctx, path, !noCompact, func(a ...any) {
cmd.Println(append([]any{time.Now().Format(time.RFC3339), ":", path, ":"}, a...)...)
}); err != nil {
cmd.Println("error: failed to upgrade metabase", path, ":", err)
} else {
success = true
cmd.Println("metabase", path, "upgraded successfully")
}
resultGuard.Lock()
result[path] = success
resultGuard.Unlock()
return nil
})
}
if err := eg.Wait(); err != nil {
return err
}
for mb, ok := range result {
if ok {
cmd.Println(mb, ": success")
} else {
cmd.Println(mb, ": failed")
}
}
return nil
}
func initUpgradeCommand() {
flags := UpgradeCmd.Flags()
flags.StringVar(&path, pathFlag, "", "Path to metabase file")
flags.Bool(noCompactFlag, false, "Do not compact upgraded metabase file")
}

View file

@ -16,8 +16,6 @@ import (
const (
namespaceTarget = "namespace"
containerTarget = "container"
userTarget = "user"
groupTarget = "group"
jsonFlag = "json"
jsonFlagDesc = "Output rule chains in JSON format"
chainIDFlag = "chain-id"

View file

@ -38,12 +38,6 @@ var (
func parseTarget(cmd *cobra.Command) policyengine.Target {
name, _ := cmd.Flags().GetString(targetNameFlag)
typ, err := parseTargetType(cmd)
// interpret "root" namespace as empty
if typ == policyengine.Namespace && name == "root" {
name = ""
}
commonCmd.ExitOnErr(cmd, "read target type error: %w", err)
return policyengine.Target{
@ -59,10 +53,6 @@ func parseTargetType(cmd *cobra.Command) (policyengine.TargetType, error) {
return policyengine.Namespace, nil
case containerTarget:
return policyengine.Container, nil
case userTarget:
return policyengine.User, nil
case groupTarget:
return policyengine.Group, nil
}
return -1, errUnknownTargetType
}
@ -104,16 +94,6 @@ func parseChainName(cmd *cobra.Command) apechain.Name {
return apeChainName
}
// invokerAdapter adapats invoker.Invoker to ContractStorageInvoker interface.
type invokerAdapter struct {
*invoker.Invoker
rpcActor invoker.RPCInvoke
}
func (n *invokerAdapter) GetRPCInvoker() invoker.RPCInvoke {
return n.rpcActor
}
func newPolicyContractReaderInterface(cmd *cobra.Command) (*morph.ContractStorageReader, *invoker.Invoker) {
c, err := helper.GetN3Client(viper.GetViper())
commonCmd.ExitOnErr(cmd, "unable to create NEO rpc client: %w", err)
@ -121,18 +101,13 @@ func newPolicyContractReaderInterface(cmd *cobra.Command) (*morph.ContractStorag
inv := invoker.New(c, nil)
var ch util.Uint160
r := management.NewReader(inv)
nnsCs, err := helper.GetContractByID(r, 1)
nnsCs, err := r.GetContractByID(1)
commonCmd.ExitOnErr(cmd, "can't get NNS contract state: %w", err)
ch, err = helper.NNSResolveHash(inv, nnsCs.Hash, helper.DomainOf(constants.PolicyContract))
commonCmd.ExitOnErr(cmd, "unable to resolve policy contract hash: %w", err)
invokerAdapter := &invokerAdapter{
Invoker: inv,
rpcActor: c,
}
return morph.NewContractStorageReader(invokerAdapter, ch), inv
return morph.NewContractStorageReader(inv, ch), inv
}
func newPolicyContractInterface(cmd *cobra.Command) (*morph.ContractStorage, *helper.LocalActor) {
@ -144,7 +119,7 @@ func newPolicyContractInterface(cmd *cobra.Command) (*morph.ContractStorage, *he
var ch util.Uint160
r := management.NewReader(ac.Invoker)
nnsCs, err := helper.GetContractByID(r, 1)
nnsCs, err := r.GetContractByID(1)
commonCmd.ExitOnErr(cmd, "can't get NNS contract state: %w", err)
ch, err = helper.NNSResolveHash(ac.Invoker, nnsCs.Hash, helper.DomainOf(constants.PolicyContract))

View file

@ -60,7 +60,7 @@ func dumpBalances(cmd *cobra.Command, _ []string) error {
if dumpStorage || dumpAlphabet || dumpProxy {
r := management.NewReader(inv)
nnsCs, err = helper.GetContractByID(r, 1)
nnsCs, err = r.GetContractByID(1)
if err != nil {
return fmt.Errorf("can't get NNS contract info: %w", err)
}

View file

@ -34,7 +34,7 @@ func dumpNetworkConfig(cmd *cobra.Command, _ []string) error {
inv := invoker.New(c, nil)
r := management.NewReader(inv)
cs, err := helper.GetContractByID(r, 1)
cs, err := r.GetContractByID(1)
if err != nil {
return fmt.Errorf("can't get NNS contract info: %w", err)
}
@ -60,8 +60,7 @@ func dumpNetworkConfig(cmd *cobra.Command, _ []string) error {
switch k {
case netmap.ContainerFeeConfig, netmap.ContainerAliasFeeConfig,
netmap.EpochDurationConfig, netmap.IrCandidateFeeConfig,
netmap.MaxObjectSizeConfig, netmap.WithdrawFeeConfig,
netmap.MaxECDataCountConfig, netmap.MaxECParityCountConfig:
netmap.MaxObjectSizeConfig, netmap.WithdrawFeeConfig:
nbuf := make([]byte, 8)
copy(nbuf[:], v)
n := binary.LittleEndian.Uint64(nbuf)
@ -93,7 +92,7 @@ func SetConfigCmd(cmd *cobra.Command, args []string) error {
}
r := management.NewReader(wCtx.ReadOnlyInvoker)
cs, err := helper.GetContractByID(r, 1)
cs, err := r.GetContractByID(1)
if err != nil {
return fmt.Errorf("can't get NNS contract info: %w", err)
}
@ -104,22 +103,14 @@ func SetConfigCmd(cmd *cobra.Command, args []string) error {
}
forceFlag, _ := cmd.Flags().GetBool(forceConfigSet)
bw := io.NewBufBinWriter()
prm := make(map[string]any)
for _, arg := range args {
k, v, err := parseConfigPair(arg, forceFlag)
if err != nil {
return err
}
prm[k] = v
}
if err := validateConfig(prm, forceFlag); err != nil {
return err
}
for k, v := range prm {
// In NeoFS this is done via Notary contract. Here, however, we can form the
// transaction locally. The first `nil` argument is required only for notary
// disabled environment which is not supported by that command.
@ -137,50 +128,6 @@ func SetConfigCmd(cmd *cobra.Command, args []string) error {
return wCtx.AwaitTx()
}
const maxECSum = 256
func validateConfig(args map[string]any, forceFlag bool) error {
var sumEC int64
_, okData := args[netmap.MaxECDataCountConfig]
_, okParity := args[netmap.MaxECParityCountConfig]
if okData != okParity {
return fmt.Errorf("both %s and %s must be present in the configuration",
netmap.MaxECDataCountConfig, netmap.MaxECParityCountConfig)
}
for k, v := range args {
switch k {
case netmap.ContainerFeeConfig, netmap.ContainerAliasFeeConfig,
netmap.EpochDurationConfig, netmap.IrCandidateFeeConfig,
netmap.MaxObjectSizeConfig, netmap.WithdrawFeeConfig,
netmap.MaxECDataCountConfig, netmap.MaxECParityCountConfig:
value, ok := v.(int64)
if !ok {
return fmt.Errorf("%s has an invalid type. Expected type: int", k)
}
if value < 0 {
return fmt.Errorf("%s must be >= 0, got %v", k, v)
}
if k == netmap.MaxECDataCountConfig || k == netmap.MaxECParityCountConfig {
sumEC += value
}
case netmap.HomomorphicHashingDisabledKey, netmap.MaintenanceModeAllowedConfig:
_, ok := v.(bool)
if !ok {
return fmt.Errorf("%s has an invalid type. Expected type: bool", k)
}
}
}
if sumEC > maxECSum && !forceFlag {
return fmt.Errorf("the sum of %s and %s must be <= %d, got %d",
netmap.MaxECDataCountConfig, netmap.MaxECParityCountConfig, maxECSum, sumEC)
}
return nil
}
func parseConfigPair(kvStr string, force bool) (key string, val any, err error) {
k, v, found := strings.Cut(kvStr, "=")
if !found {
@ -193,8 +140,7 @@ func parseConfigPair(kvStr string, force bool) (key string, val any, err error)
switch key {
case netmap.ContainerFeeConfig, netmap.ContainerAliasFeeConfig,
netmap.EpochDurationConfig, netmap.IrCandidateFeeConfig,
netmap.MaxObjectSizeConfig, netmap.WithdrawFeeConfig,
netmap.MaxECDataCountConfig, netmap.MaxECParityCountConfig:
netmap.MaxObjectSizeConfig, netmap.WithdrawFeeConfig:
val, err = strconv.ParseInt(valRaw, 10, 64)
if err != nil {
err = fmt.Errorf("could not parse %s's value '%s' as int: %w", key, valRaw, err)

View file

@ -1,34 +0,0 @@
package config
import (
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
"github.com/stretchr/testify/require"
)
func Test_ValidateConfig(t *testing.T) {
testArgs := make(map[string]any)
testArgs[netmap.MaxECDataCountConfig] = int64(11)
require.Error(t, validateConfig(testArgs, false))
testArgs[netmap.MaxECParityCountConfig] = int64(256)
require.Error(t, validateConfig(testArgs, false))
require.NoError(t, validateConfig(testArgs, true))
testArgs[netmap.MaxECParityCountConfig] = int64(-1)
require.Error(t, validateConfig(testArgs, false))
testArgs[netmap.MaxECParityCountConfig] = int64(55)
require.NoError(t, validateConfig(testArgs, false))
testArgs[netmap.HomomorphicHashingDisabledKey] = "1"
require.Error(t, validateConfig(testArgs, false))
testArgs[netmap.HomomorphicHashingDisabledKey] = true
require.NoError(t, validateConfig(testArgs, false))
testArgs["not-well-known-configuration-key"] = "key"
require.NoError(t, validateConfig(testArgs, false))
}

View file

@ -5,7 +5,6 @@ import (
"errors"
"fmt"
"os"
"slices"
"sort"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
@ -34,7 +33,7 @@ func getContainerContractHash(cmd *cobra.Command, inv *invoker.Invoker) (util.Ui
}
if err != nil {
r := management.NewReader(inv)
nnsCs, err := helper.GetContractByID(r, 1)
nnsCs, err := r.GetContractByID(1)
if err != nil {
return util.Uint160{}, fmt.Errorf("can't get NNS contract state: %w", err)
}
@ -304,7 +303,7 @@ func parseContainers(filename string) ([]Container, error) {
func fetchContainerContractHash(wCtx *helper.InitializeContext) (util.Uint160, error) {
r := management.NewReader(wCtx.ReadOnlyInvoker)
nnsCs, err := helper.GetContractByID(r, 1)
nnsCs, err := r.GetContractByID(1)
if err != nil {
return util.Uint160{}, fmt.Errorf("can't get NNS contract state: %w", err)
}
@ -447,7 +446,7 @@ func getCIDFilterFunc(cmd *cobra.Command) (func([]byte) bool, error) {
var id cid.ID
id.SetSHA256(v)
idStr := id.EncodeToString()
_, found := slices.BinarySearch(rawIDs, idStr)
return found
n := sort.Search(len(rawIDs), func(i int) bool { return rawIDs[i] >= idStr })
return n < len(rawIDs) && rawIDs[n] == idStr
}, nil
}

View file

@ -79,7 +79,7 @@ func deployContractCmd(cmd *cobra.Command, args []string) error {
}
r := management.NewReader(c.ReadOnlyInvoker)
nnsCs, err := helper.GetContractByID(r, 1)
nnsCs, err := r.GetContractByID(1)
if err != nil {
return fmt.Errorf("can't fetch NNS contract state: %w", err)
}

View file

@ -42,7 +42,7 @@ func dumpContractHashes(cmd *cobra.Command, _ []string) error {
}
r := management.NewReader(invoker.New(c, nil))
cs, err := helper.GetContractByID(r, 1)
cs, err := r.GetContractByID(1)
if err != nil {
return err
}
@ -68,7 +68,7 @@ func dumpContractHashes(cmd *cobra.Command, _ []string) error {
if irSize != 0 {
bw.Reset()
for i := range irSize {
for i := 0; i < irSize; i++ {
emit.AppCall(bw.BinWriter, cs.Hash, "resolve", callflag.ReadOnly,
helper.GetAlphabetNNSDomain(i),
int64(nns.TXT))
@ -79,7 +79,7 @@ func dumpContractHashes(cmd *cobra.Command, _ []string) error {
return fmt.Errorf("can't fetch info from NNS: %w", err)
}
for i := range irSize {
for i := 0; i < irSize; i++ {
info := contractDumpInfo{name: fmt.Sprintf("alphabet %d", i)}
if h, err := helper.ParseNNSResolveResult(alphaRes.Stack[i]); err == nil {
info.hash = h

View file

@ -3,32 +3,24 @@ package frostfsid
import (
"errors"
"fmt"
"math/big"
"sort"
frostfsidclient "git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client"
frostfsidrpclient "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/frostfsid"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"github.com/google/uuid"
"github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
"github.com/nspcc-dev/neo-go/pkg/io"
"github.com/nspcc-dev/neo-go/pkg/neorpc/result"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/management"
"github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/nspcc-dev/neo-go/pkg/vm/emit"
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
const iteratorBatchSize = 1
const (
namespaceFlag = "namespace"
subjectNameFlag = "subject-name"
@ -202,7 +194,6 @@ func initFrostfsIDCreateGroupCmd() {
frostfsidCreateGroupCmd.Flags().String(namespaceFlag, "", "Namespace where create group")
frostfsidCreateGroupCmd.Flags().String(groupNameFlag, "", "Group name, must be unique in namespace")
frostfsidCreateGroupCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
_ = frostfsidCreateGroupCmd.MarkFlagRequired(groupNameFlag)
}
func initFrostfsIDDeleteGroupCmd() {
@ -258,15 +249,12 @@ func frostfsidCreateNamespace(cmd *cobra.Command, _ []string) {
}
func frostfsidListNamespaces(cmd *cobra.Command, _ []string) {
inv, _, hash := initInvoker(cmd)
reader := frostfsidrpclient.NewReader(inv, hash)
sessionID, it, err := reader.ListNamespaces()
commonCmd.ExitOnErr(cmd, "can't get namespace: %w", err)
items, err := readIterator(inv, &it, iteratorBatchSize, sessionID)
commonCmd.ExitOnErr(cmd, "can't read iterator: %w", err)
ffsid, err := newFrostfsIDClient(cmd)
commonCmd.ExitOnErr(cmd, "init contract invoker: %w", err)
namespaces, err := ffsid.roCli.ListNamespaces()
commonCmd.ExitOnErr(cmd, "list namespaces: %w", err)
namespaces, err := frostfsidclient.ParseNamespaces(items)
commonCmd.ExitOnErr(cmd, "can't parse namespace: %w", err)
sort.Slice(namespaces, func(i, j int) bool { return namespaces[i].Name < namespaces[j].Name })
for _, namespace := range namespaces {
@ -307,15 +295,14 @@ func frostfsidDeleteSubject(cmd *cobra.Command, _ []string) {
}
func frostfsidListSubjects(cmd *cobra.Command, _ []string) {
includeNames, _ := cmd.Flags().GetBool(includeNamesFlag)
ns := getFrostfsIDNamespace(cmd)
inv, _, hash := initInvoker(cmd)
reader := frostfsidrpclient.NewReader(inv, hash)
sessionID, it, err := reader.ListNamespaceSubjects(ns)
commonCmd.ExitOnErr(cmd, "can't get namespace: %w", err)
includeNames, _ := cmd.Flags().GetBool(includeNamesFlag)
subAddresses, err := frostfsidclient.UnwrapArrayOfUint160(readIterator(inv, &it, iteratorBatchSize, sessionID))
commonCmd.ExitOnErr(cmd, "can't unwrap: %w", err)
ffsid, err := newFrostfsIDClient(cmd)
commonCmd.ExitOnErr(cmd, "init contract invoker: %w", err)
subAddresses, err := ffsid.roCli.ListNamespaceSubjects(ns)
commonCmd.ExitOnErr(cmd, "list subjects: %w", err)
sort.Slice(subAddresses, func(i, j int) bool { return subAddresses[i].Less(subAddresses[j]) })
@ -325,14 +312,8 @@ func frostfsidListSubjects(cmd *cobra.Command, _ []string) {
continue
}
sessionID, it, err := reader.ListSubjects()
commonCmd.ExitOnErr(cmd, "can't get subject: %w", err)
items, err := readIterator(inv, &it, iteratorBatchSize, sessionID)
commonCmd.ExitOnErr(cmd, "can't read iterator: %w", err)
subj, err := frostfsidclient.ParseSubject(items)
commonCmd.ExitOnErr(cmd, "can't parse subject: %w", err)
subj, err := ffsid.roCli.GetSubject(addr)
commonCmd.ExitOnErr(cmd, "get subject: %w", err)
cmd.Printf("%s (%s)\n", address.Uint160ToString(addr), subj.Name)
}
@ -367,17 +348,13 @@ func frostfsidDeleteGroup(cmd *cobra.Command, _ []string) {
}
func frostfsidListGroups(cmd *cobra.Command, _ []string) {
inv, _, hash := initInvoker(cmd)
ns := getFrostfsIDNamespace(cmd)
reader := frostfsidrpclient.NewReader(inv, hash)
sessionID, it, err := reader.ListGroups(ns)
commonCmd.ExitOnErr(cmd, "can't get namespace: %w", err)
ffsid, err := newFrostfsIDClient(cmd)
commonCmd.ExitOnErr(cmd, "init contract invoker: %w", err)
items, err := readIterator(inv, &it, iteratorBatchSize, sessionID)
commonCmd.ExitOnErr(cmd, "can't list groups: %w", err)
groups, err := frostfsidclient.ParseGroups(items)
commonCmd.ExitOnErr(cmd, "can't parse groups: %w", err)
groups, err := ffsid.roCli.ListGroups(ns)
commonCmd.ExitOnErr(cmd, "list groups: %w", err)
sort.Slice(groups, func(i, j int) bool { return groups[i].Name < groups[j].Name })
@ -416,19 +393,12 @@ func frostfsidListGroupSubjects(cmd *cobra.Command, _ []string) {
ns := getFrostfsIDNamespace(cmd)
groupID := getFrostfsIDGroupID(cmd)
includeNames, _ := cmd.Flags().GetBool(includeNamesFlag)
inv, cs, hash := initInvoker(cmd)
_, err := helper.NNSResolveHash(inv, cs.Hash, helper.DomainOf(constants.FrostfsIDContract))
commonCmd.ExitOnErr(cmd, "can't get netmap contract hash: %w", err)
reader := frostfsidrpclient.NewReader(inv, hash)
sessionID, it, err := reader.ListGroupSubjects(ns, big.NewInt(groupID))
commonCmd.ExitOnErr(cmd, "can't list groups: %w", err)
ffsid, err := newFrostfsIDClient(cmd)
commonCmd.ExitOnErr(cmd, "init contract client: %w", err)
items, err := readIterator(inv, &it, iteratorBatchSize, sessionID)
commonCmd.ExitOnErr(cmd, "can't read iterator: %w", err)
subjects, err := frostfsidclient.UnwrapArrayOfUint160(items, err)
commonCmd.ExitOnErr(cmd, "can't unwrap: %w", err)
subjects, err := ffsid.roCli.ListGroupSubjects(ns, groupID)
commonCmd.ExitOnErr(cmd, "list group subjects: %w", err)
sort.Slice(subjects, func(i, j int) bool { return subjects[i].Less(subjects[j]) })
@ -438,10 +408,9 @@ func frostfsidListGroupSubjects(cmd *cobra.Command, _ []string) {
continue
}
items, err := reader.GetSubject(subjAddr)
commonCmd.ExitOnErr(cmd, "can't get subject: %w", err)
subj, err := frostfsidclient.ParseSubject(items)
commonCmd.ExitOnErr(cmd, "can't parse subject: %w", err)
subj, err := ffsid.roCli.GetSubject(subjAddr)
commonCmd.ExitOnErr(cmd, "get subject: %w", err)
cmd.Printf("%s (%s)\n", address.Uint160ToString(subjAddr), subj.Name)
}
}
@ -456,11 +425,11 @@ type frostfsidClient struct {
func newFrostfsIDClient(cmd *cobra.Command) (*frostfsidClient, error) {
wCtx, err := helper.NewInitializeContext(cmd, viper.GetViper())
if err != nil {
return nil, fmt.Errorf("can't initialize context: %w", err)
return nil, fmt.Errorf("can't to initialize context: %w", err)
}
r := management.NewReader(wCtx.ReadOnlyInvoker)
cs, err := helper.GetContractByID(r, 1)
cs, err := r.GetContractByID(1)
if err != nil {
return nil, fmt.Errorf("can't get NNS contract info: %w", err)
}
@ -504,35 +473,3 @@ func (f *frostfsidClient) sendWaitRes() (*state.AppExecResult, error) {
f.wCtx.Command.Println("Waiting for transactions to persist...")
return f.roCli.Wait(f.wCtx.SentTxs[0].Hash, f.wCtx.SentTxs[0].Vub, nil)
}
func readIterator(inv *invoker.Invoker, iter *result.Iterator, batchSize int, sessionID uuid.UUID) ([]stackitem.Item, error) {
var shouldStop bool
res := make([]stackitem.Item, 0)
for !shouldStop {
items, err := inv.TraverseIterator(sessionID, iter, batchSize)
if err != nil {
return nil, err
}
res = append(res, items...)
shouldStop = len(items) < batchSize
}
return res, nil
}
func initInvoker(cmd *cobra.Command) (*invoker.Invoker, *state.Contract, util.Uint160) {
c, err := helper.GetN3Client(viper.GetViper())
commonCmd.ExitOnErr(cmd, "can't create N3 client: %w", err)
inv := invoker.New(c, nil)
r := management.NewReader(inv)
cs, err := r.GetContractByID(1)
commonCmd.ExitOnErr(cmd, "can't get NNS contract info: %w", err)
nmHash, err := helper.NNSResolveHash(inv, cs.Hash, helper.DomainOf(constants.FrostfsIDContract))
commonCmd.ExitOnErr(cmd, "can't get netmap contract hash: %w", err)
return inv, cs, nmHash
}

View file

@ -65,47 +65,41 @@ func initializeWallets(v *viper.Viper, walletDir string, size int) ([]string, er
pubs := make(keys.PublicKeys, size)
passwords := make([]string, size)
var errG errgroup.Group
for i := range wallets {
password, err := config.GetPassword(v, innerring.GlagoliticLetter(i).String())
if err != nil {
return nil, fmt.Errorf("can't fetch password: %w", err)
}
errG.Go(func() error {
p := filepath.Join(walletDir, innerring.GlagoliticLetter(i).String()+".json")
f, err := os.OpenFile(p, os.O_CREATE, 0o644)
if err != nil {
return fmt.Errorf("can't create wallet file: %w", err)
}
if err := f.Close(); err != nil {
return fmt.Errorf("can't close wallet file: %w", err)
}
w, err := wallet.NewWallet(p)
if err != nil {
return fmt.Errorf("can't create wallet: %w", err)
}
if err := w.CreateAccount(constants.SingleAccountName, password); err != nil {
return fmt.Errorf("can't create account: %w", err)
}
p := filepath.Join(walletDir, innerring.GlagoliticLetter(i).String()+".json")
f, err := os.OpenFile(p, os.O_CREATE, 0o644)
if err != nil {
return nil, fmt.Errorf("can't create wallet file: %w", err)
}
if err := f.Close(); err != nil {
return nil, fmt.Errorf("can't close wallet file: %w", err)
}
w, err := wallet.NewWallet(p)
if err != nil {
return nil, fmt.Errorf("can't create wallet: %w", err)
}
if err := w.CreateAccount(constants.SingleAccountName, password); err != nil {
return nil, fmt.Errorf("can't create account: %w", err)
}
passwords[i] = password
wallets[i] = w
pubs[i] = w.Accounts[0].PrivateKey().PublicKey()
return nil
})
passwords[i] = password
wallets[i] = w
pubs[i] = w.Accounts[0].PrivateKey().PublicKey()
}
if err := errG.Wait(); err != nil {
return nil, err
}
var errG errgroup.Group
// Create committee account with N/2+1 multi-signature.
majCount := smartcontract.GetMajorityHonestNodeCount(size)
// Create consensus account with 2*N/3+1 multi-signature.
bftCount := smartcontract.GetDefaultHonestNodeCount(size)
for i := range wallets {
i := i
ps := pubs.Copy()
errG.Go(func() error {
if err := addMultisigAccount(wallets[i], majCount, constants.CommitteeAccountName, passwords[i], ps); err != nil {

View file

@ -23,6 +23,8 @@ import (
)
func TestGenerateAlphabet(t *testing.T) {
const size = 4
walletDir := t.TempDir()
buf := setupTestTerminal(t)
@ -53,13 +55,13 @@ func TestGenerateAlphabet(t *testing.T) {
t.Run("no password for contract group wallet", func(t *testing.T) {
buf.Reset()
v.Set(commonflags.AlphabetWalletsFlag, walletDir)
require.NoError(t, cmd.Flags().Set(commonflags.AlphabetSizeFlag, "1"))
buf.WriteString("pass\r")
require.NoError(t, cmd.Flags().Set(commonflags.AlphabetSizeFlag, strconv.FormatUint(size, 10)))
for i := uint64(0); i < size; i++ {
buf.WriteString(strconv.FormatUint(i, 10) + "\r")
}
require.Error(t, AlphabetCreds(cmd, nil))
})
const size = 4
buf.Reset()
v.Set(commonflags.AlphabetWalletsFlag, walletDir)
require.NoError(t, GenerateAlphabetCmd.Flags().Set(commonflags.AlphabetSizeFlag, strconv.FormatUint(size, 10)))

View file

@ -23,10 +23,9 @@ import (
// LocalActor is a kludge, do not use it outside of the morph commands.
type LocalActor struct {
neoActor *actor.Actor
accounts []*wallet.Account
Invoker *invoker.Invoker
rpcInvoker invoker.RPCInvoke
neoActor *actor.Actor
accounts []*wallet.Account
Invoker *invoker.Invoker
}
// NewLocalActor create LocalActor with accounts form provided wallets.
@ -69,10 +68,9 @@ func NewLocalActor(cmd *cobra.Command, c actor.RPCActor) (*LocalActor, error) {
}
}
return &LocalActor{
neoActor: act,
accounts: accounts,
Invoker: &act.Invoker,
rpcInvoker: c,
neoActor: act,
accounts: accounts,
Invoker: &act.Invoker,
}, nil
}
@ -169,7 +167,3 @@ func (a *LocalActor) MakeUnsignedRun(_ []byte, _ []transaction.Attribute) (*tran
func (a *LocalActor) MakeCall(_ util.Uint160, _ string, _ ...any) (*transaction.Transaction, error) {
panic("unimplemented")
}
func (a *LocalActor) GetRPCInvoker() invoker.RPCInvoke {
return a.rpcInvoker
}

View file

@ -15,7 +15,7 @@ import (
func getFrostfsIDAdminFromContract(roInvoker *invoker.Invoker) (util.Uint160, bool, error) {
r := management.NewReader(roInvoker)
cs, err := GetContractByID(r, 1)
cs, err := r.GetContractByID(1)
if err != nil {
return util.Uint160{}, false, fmt.Errorf("get nns contract: %w", err)
}
@ -62,7 +62,7 @@ func GetContractDeployData(c *InitializeContext, ctrName string, keysParam []any
// In case if NNS is updated multiple times, we can't calculate
// it's actual hash based on local data, thus query chain.
r := management.NewReader(c.ReadOnlyInvoker)
nnsCs, err := GetContractByID(r, 1)
nnsCs, err := r.GetContractByID(1)
if err != nil {
return nil, fmt.Errorf("get nns contract: %w", err)
}

View file

@ -44,6 +44,7 @@ type LocalClient struct {
transactions []*transaction.Transaction
dumpPath string
accounts []*wallet.Account
maxGasInvoke int64
}
func NewLocalClient(cmd *cobra.Command, v *viper.Viper, wallets []*wallet.Wallet, dumpPath string) (*LocalClient, error) {
@ -103,9 +104,10 @@ func NewLocalClient(cmd *cobra.Command, v *viper.Viper, wallets []*wallet.Wallet
}
return &LocalClient{
bc: bc,
dumpPath: dumpPath,
accounts: accounts[:m],
bc: bc,
dumpPath: dumpPath,
accounts: accounts[:m],
maxGasInvoke: 15_0000_0000,
}, nil
}
@ -113,7 +115,7 @@ func (l *LocalClient) GetBlockCount() (uint32, error) {
return l.bc.BlockHeight(), nil
}
func (l *LocalClient) GetNativeContracts() ([]state.Contract, error) {
func (l *LocalClient) GetNativeContracts() ([]state.NativeContract, error) {
return l.bc.GetNatives(), nil
}
@ -224,7 +226,7 @@ func (l *LocalClient) CalculateNetworkFee(tx *transaction.Transaction) (int64, e
paramz = []manifest.Parameter{{Type: smartcontract.SignatureType}}
} else if nSigs, _, ok := vm.ParseMultiSigContract(w.VerificationScript); ok {
paramz = make([]manifest.Parameter, nSigs)
for j := range nSigs {
for j := 0; j < nSigs; j++ {
paramz[j] = manifest.Parameter{Type: smartcontract.SignatureType}
}
}

View file

@ -2,7 +2,6 @@ package helper
import (
"context"
"crypto/tls"
"errors"
"fmt"
"time"
@ -28,7 +27,7 @@ type Client interface {
invoker.RPCInvoke
GetBlockCount() (uint32, error)
GetNativeContracts() ([]state.Contract, error)
GetNativeContracts() ([]state.NativeContract, error)
GetApplicationLog(util.Uint256, *trigger.Type) (*result.ApplicationLog, error)
GetVersion() (*result.Version, error)
SendRawTransaction(*transaction.Transaction) (util.Uint256, error)
@ -61,23 +60,9 @@ func GetN3Client(v *viper.Viper) (Client, error) {
if endpoint == "" {
return nil, errors.New("missing endpoint")
}
var cfg *tls.Config
if rootCAs := v.GetStringSlice("tls.trusted_ca_list"); len(rootCAs) != 0 {
certFile := v.GetString("tls.certificate")
keyFile := v.GetString("tls.key")
tlsConfig, err := rpcclient.TLSClientConfig(rootCAs, certFile, keyFile)
if err != nil {
return nil, err
}
cfg = tlsConfig
}
c, err := rpcclient.New(ctx, endpoint, rpcclient.Options{
MaxConnsPerHost: maxConnsPerHost,
RequestTimeout: requestTimeout,
TLSClientConfig: cfg,
})
if err != nil {
return nil, err

View file

@ -35,8 +35,6 @@ func GetDefaultNetmapContractConfigMap() map[string]any {
m := make(map[string]any)
m[netmap.EpochDurationConfig] = viper.GetInt64(commonflags.EpochDurationInitFlag)
m[netmap.MaxObjectSizeConfig] = viper.GetInt64(commonflags.MaxObjectSizeInitFlag)
m[netmap.MaxECDataCountConfig] = viper.GetInt64(commonflags.MaxECDataCountFlag)
m[netmap.MaxECParityCountConfig] = viper.GetInt64(commonflags.MaxECParityCounFlag)
m[netmap.ContainerFeeConfig] = viper.GetInt64(commonflags.ContainerFeeInitFlag)
m[netmap.ContainerAliasFeeConfig] = viper.GetInt64(commonflags.ContainerAliasFeeInitFlag)
m[netmap.IrCandidateFeeConfig] = viper.GetInt64(commonflags.CandidateFeeInitFlag)
@ -89,7 +87,7 @@ func EmitNewEpochCall(bw *io.BufBinWriter, wCtx *InitializeContext, nmHash util.
func GetNetConfigFromNetmapContract(roInvoker *invoker.Invoker) ([]stackitem.Item, error) {
r := management.NewReader(roInvoker)
cs, err := GetContractByID(r, 1)
cs, err := r.GetContractByID(1)
if err != nil {
return nil, fmt.Errorf("get nns contract: %w", err)
}

View file

@ -14,12 +14,10 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring"
"github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/actor"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/management"
"github.com/nspcc-dev/neo-go/pkg/wallet"
"github.com/spf13/viper"
)
@ -42,48 +40,45 @@ func openAlphabetWallets(v *viper.Viper, walletDir string) ([]*wallet.Wallet, er
return nil, fmt.Errorf("can't read alphabet wallets dir: %w", err)
}
var wallets []*wallet.Wallet
var letter string
for i := range constants.MaxAlphabetNodes {
letter = innerring.GlagoliticLetter(i).String()
p := filepath.Join(walletDir, letter+".json")
var w *wallet.Wallet
w, err = wallet.NewWalletFromFile(p)
if err != nil {
if errors.Is(err, os.ErrNotExist) {
err = nil
} else {
err = fmt.Errorf("can't open wallet: %w", err)
var size int
loop:
for i := 0; i < len(walletFiles); i++ {
name := innerring.GlagoliticLetter(i).String() + ".json"
for j := range walletFiles {
if walletFiles[j].Name() == name {
size++
continue loop
}
break
}
break
}
if size == 0 {
return nil, errors.New("alphabet wallets dir is empty (run `generate-alphabet` command first)")
}
wallets := make([]*wallet.Wallet, size)
for i := 0; i < size; i++ {
letter := innerring.GlagoliticLetter(i).String()
p := filepath.Join(walletDir, letter+".json")
w, err := wallet.NewWalletFromFile(p)
if err != nil {
return nil, fmt.Errorf("can't open wallet: %w", err)
}
var password string
password, err = config.GetPassword(v, letter)
password, err := config.GetPassword(v, letter)
if err != nil {
err = fmt.Errorf("can't fetch password: %w", err)
break
return nil, fmt.Errorf("can't fetch password: %w", err)
}
for i := range w.Accounts {
if err = w.Accounts[i].Decrypt(password, keys.NEP2ScryptParams()); err != nil {
err = fmt.Errorf("can't unlock wallet: %w", err)
break
if err := w.Accounts[i].Decrypt(password, keys.NEP2ScryptParams()); err != nil {
return nil, fmt.Errorf("can't unlock wallet: %w", err)
}
}
wallets = append(wallets, w)
}
if err != nil {
return nil, fmt.Errorf("can't read wallet for letter '%s': %w", letter, err)
}
if len(wallets) == 0 {
err = errors.New("there are no alphabet wallets in dir (run `generate-alphabet` command first)")
if len(walletFiles) > 0 {
err = fmt.Errorf("use glagolitic names for wallets(run `print-alphabet`): %w", err)
}
return nil, err
wallets[i] = w
}
return wallets, nil
}
@ -127,11 +122,11 @@ func readContractsFromArchive(file io.Reader, names []string) (map[string]*Contr
}
r := tar.NewReader(gr)
var h *tar.Header
for h, err = r.Next(); err == nil && h != nil; h, err = r.Next() {
if h.Typeflag != tar.TypeReg {
continue
for h, err := r.Next(); ; h, err = r.Next() {
if err != nil {
break
}
dir, _ := filepath.Split(h.Name)
ctrName := filepath.Base(dir)
@ -154,9 +149,6 @@ func readContractsFromArchive(file io.Reader, names []string) (map[string]*Contr
}
m[ctrName] = cs
}
if err != nil && err != io.EOF {
return nil, fmt.Errorf("can't read contracts from archive: %w", err)
}
for ctrName, cs := range m {
if cs.RawNEF == nil {
@ -183,18 +175,3 @@ func ParseGASAmount(s string) (fixedn.Fixed8, error) {
}
return gasAmount, nil
}
// GetContractByID retrieves a contract by its ID using the standard GetContractByID method.
// However, if the returned state.Contract is nil, it returns an error indicating that the contract was not found.
// See https://git.frostfs.info/TrueCloudLab/frostfs-node/issues/1210
func GetContractByID(r *management.ContractReader, id int32) (*state.Contract, error) {
cs, err := r.GetContractByID(id)
if err != nil {
return nil, err
}
if cs == nil {
return nil, errors.New("contract not found")
}
return cs, nil
}

View file

@ -21,7 +21,7 @@ import (
func setNNS(c *helper.InitializeContext) error {
r := management.NewReader(c.ReadOnlyInvoker)
nnsCs, err := helper.GetContractByID(r, 1)
nnsCs, err := r.GetContractByID(1)
if err != nil {
return err
}

View file

@ -100,7 +100,10 @@ func registerCandidates(c *helper.InitializeContext) error {
// Register candidates in batches in order to overcome the signers amount limit.
// See: https://github.com/nspcc-dev/neo-go/blob/master/pkg/core/transaction/transaction.go#L27
for i := 0; i < need; i += registerBatchSize {
start, end := i, min(i+registerBatchSize, need)
start, end := i, i+registerBatchSize
if end > need {
end = need
}
// This check is sound because transactions are accepted/rejected atomically.
if have >= end {
continue

View file

@ -113,7 +113,7 @@ func generateTestData(dir string, size int) error {
}
var pubs []string
for i := range size {
for i := 0; i < size; i++ {
p := filepath.Join(dir, innerring.GlagoliticLetter(i).String()+".json")
w, err := wallet.NewWalletFromFile(p)
if err != nil {
@ -148,7 +148,7 @@ func generateTestData(dir string, size int) error {
}
func setTestCredentials(v *viper.Viper, size int) {
for i := range size {
for i := 0; i < size; i++ {
v.Set("credentials."+innerring.GlagoliticLetter(i).String(), strconv.FormatUint(uint64(i), 10))
}
v.Set("credentials.contract", constants.TestContractPassword)

View file

@ -26,10 +26,6 @@ const (
initialAlphabetGASAmount = 10_000 * native.GASFactor
// initialProxyGASAmount represents the amount of GAS given to a proxy contract.
initialProxyGASAmount = 50_000 * native.GASFactor
// alphabetGasRatio is a coefficient that defines the threshold below which
// the balance of the alphabet node is considered not replenished. The value
// of this coefficient is determined empirically.
alphabetGasRatio = 5
)
func transferFunds(c *helper.InitializeContext) error {
@ -84,7 +80,7 @@ func transferFundsFinished(c *helper.InitializeContext) (bool, error) {
r := nep17.NewReader(c.ReadOnlyInvoker, gas.Hash)
res, err := r.BalanceOf(acc.Contract.ScriptHash())
return res.Cmp(big.NewInt(alphabetGasRatio*native.GASFactor)) == 1, err
return res.Cmp(big.NewInt(initialAlphabetGASAmount/2)) == 1, err
}
func transferGASToProxy(c *helper.InitializeContext) error {

View file

@ -25,8 +25,6 @@ var Cmd = &cobra.Command{
_ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
_ = viper.BindPFlag(commonflags.EpochDurationInitFlag, cmd.Flags().Lookup(epochDurationCLIFlag))
_ = viper.BindPFlag(commonflags.MaxObjectSizeInitFlag, cmd.Flags().Lookup(maxObjectSizeCLIFlag))
_ = viper.BindPFlag(commonflags.MaxECDataCountFlag, cmd.Flags().Lookup(commonflags.MaxECDataCountFlag))
_ = viper.BindPFlag(commonflags.MaxECParityCounFlag, cmd.Flags().Lookup(commonflags.MaxECParityCounFlag))
_ = viper.BindPFlag(commonflags.HomomorphicHashDisabledInitFlag, cmd.Flags().Lookup(homomorphicHashDisabledCLIFlag))
_ = viper.BindPFlag(commonflags.CandidateFeeInitFlag, cmd.Flags().Lookup(candidateFeeCLIFlag))
_ = viper.BindPFlag(commonflags.ContainerFeeInitFlag, cmd.Flags().Lookup(containerFeeCLIFlag))

View file

@ -15,11 +15,11 @@ import (
func ForceNewEpochCmd(cmd *cobra.Command, _ []string) error {
wCtx, err := helper.NewInitializeContext(cmd, viper.GetViper())
if err != nil {
return fmt.Errorf("can't initialize context: %w", err)
return fmt.Errorf("can't to initialize context: %w", err)
}
r := management.NewReader(wCtx.ReadOnlyInvoker)
cs, err := helper.GetContractByID(r, 1)
cs, err := r.GetContractByID(1)
if err != nil {
return fmt.Errorf("can't get NNS contract info: %w", err)
}

View file

@ -19,7 +19,7 @@ func listNetmapCandidatesNodes(cmd *cobra.Command, _ []string) {
inv := invoker.New(c, nil)
r := management.NewReader(inv)
cs, err := helper.GetContractByID(r, 1)
cs, err := r.GetContractByID(1)
commonCmd.ExitOnErr(cmd, "can't get NNS contract info: %w", err)
nmHash, err := helper.NNSResolveHash(inv, cs.Hash, helper.DomainOf(constants.NetmapContract))

View file

@ -19,7 +19,7 @@ func getRPCClient(cmd *cobra.Command) (*client.Contract, *helper.LocalActor, uti
commonCmd.ExitOnErr(cmd, "can't create actor: %w", err)
r := management.NewReader(ac.Invoker)
nnsCs, err := helper.GetContractByID(r, 1)
nnsCs, err := r.GetContractByID(1)
commonCmd.ExitOnErr(cmd, "can't get NNS contract state: %w", err)
return client.New(ac, nnsCs.Hash), ac, nnsCs.Hash
}

View file

@ -37,7 +37,7 @@ func RemoveNodesCmd(cmd *cobra.Command, args []string) error {
defer wCtx.Close()
r := management.NewReader(wCtx.ReadOnlyInvoker)
cs, err := helper.GetContractByID(r, 1)
cs, err := r.GetContractByID(1)
if err != nil {
return fmt.Errorf("can't get NNS contract info: %w", err)
}

View file

@ -30,7 +30,7 @@ var errInvalidParameterFormat = errors.New("invalid parameter format, must be Pa
func SetPolicyCmd(cmd *cobra.Command, args []string) error {
wCtx, err := helper.NewInitializeContext(cmd, viper.GetViper())
if err != nil {
return fmt.Errorf("can't initialize context: %w", err)
return fmt.Errorf("can't to initialize context: %w", err)
}
bw := io.NewBufBinWriter()
@ -48,7 +48,7 @@ func SetPolicyCmd(cmd *cobra.Command, args []string) error {
value, err := strconv.ParseUint(v, 10, 32)
if err != nil {
return fmt.Errorf("can't parse parameter value '%s': %w", args[i], err)
return fmt.Errorf("can't parse parameter value '%s': %w", args[1], err)
}
emit.AppCall(bw.BinWriter, policy.Hash, "set"+k, callflag.All, int64(value))

View file

@ -39,11 +39,11 @@ func removeProxyAccount(cmd *cobra.Command, _ []string) {
func processAccount(cmd *cobra.Command, addr util.Uint160, method string) error {
wCtx, err := helper.NewInitializeContext(cmd, viper.GetViper())
if err != nil {
return fmt.Errorf("can't initialize context: %w", err)
return fmt.Errorf("can't to initialize context: %w", err)
}
r := management.NewReader(wCtx.ReadOnlyInvoker)
cs, err := helper.GetContractByID(r, 1)
cs, err := r.GetContractByID(1)
if err != nil {
return fmt.Errorf("can't get NNS contract info: %w", err)
}

View file

@ -5,7 +5,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/storagecfg"
"git.frostfs.info/TrueCloudLab/frostfs-node/misc"
@ -42,7 +41,6 @@ func init() {
rootCmd.AddCommand(config.RootCmd)
rootCmd.AddCommand(morph.RootCmd)
rootCmd.AddCommand(storagecfg.RootCmd)
rootCmd.AddCommand(metabase.RootCmd)
rootCmd.AddCommand(autocomplete.Command("frostfs-adm"))
rootCmd.AddCommand(gendoc.Command(rootCmd, gendoc.Options{}))

View file

@ -61,8 +61,6 @@ storage:
depth: 1 # max depth of object tree storage in key-value DB
width: 4 # max width of object tree storage in key-value DB
opened_cache_capacity: 50 # maximum number of opened database files
opened_cache_ttl: 5m # ttl for opened database file
opened_cache_exp_interval: 15s # cache cleanup interval for expired blobovnicza's
gc:
remover_batch_size: 200 # number of objects to be removed by the garbage collector

View file

@ -2,13 +2,10 @@ package internal
import (
"bytes"
"cmp"
"context"
"errors"
"fmt"
"io"
"os"
"slices"
"sort"
"strings"
@ -17,6 +14,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@ -191,6 +189,54 @@ func DeleteContainer(ctx context.Context, prm DeleteContainerPrm) (res DeleteCon
return
}
// EACLPrm groups parameters of EACL operation.
type EACLPrm struct {
Client *client.Client
ClientParams client.PrmContainerEACL
}
// EACLRes groups the resulting values of EACL operation.
type EACLRes struct {
cliRes *client.ResContainerEACL
}
// EACL returns requested eACL table.
func (x EACLRes) EACL() eacl.Table {
return x.cliRes.Table()
}
// EACL reads eACL table from FrostFS by container ID.
//
// Returns any error which prevented the operation from completing correctly in error return.
func EACL(ctx context.Context, prm EACLPrm) (res EACLRes, err error) {
res.cliRes, err = prm.Client.ContainerEACL(ctx, prm.ClientParams)
return
}
// SetEACLPrm groups parameters of SetEACL operation.
type SetEACLPrm struct {
Client *client.Client
ClientParams client.PrmContainerSetEACL
}
// SetEACLRes groups the resulting values of SetEACL operation.
type SetEACLRes struct{}
// SetEACL requests to save an eACL table in FrostFS.
//
// Operation is asynchronous and no guaranteed even in the absence of errors.
// The required time is also not predictable.
//
// Success can be verified by reading by container identifier.
//
// Returns any error which prevented the operation from completing correctly in error return.
func SetEACL(ctx context.Context, prm SetEACLPrm) (res SetEACLRes, err error) {
_, err = prm.Client.ContainerSetEACL(ctx, prm.ClientParams)
return
}
// NetworkInfoPrm groups parameters of NetworkInfo operation.
type NetworkInfoPrm struct {
Client *client.Client
@ -666,7 +712,7 @@ func SearchObjects(ctx context.Context, prm SearchObjectsPrm) (*SearchObjectsRes
for {
n, ok = rdr.Read(buf)
for i := range n {
for i := 0; i < n; i++ {
list = append(list, buf[i])
}
if !ok {
@ -846,65 +892,3 @@ func SyncContainerSettings(ctx context.Context, prm SyncContainerPrm) (*SyncCont
return new(SyncContainerRes), nil
}
// PatchObjectPrm groups parameters of PatchObject operation.
type PatchObjectPrm struct {
commonObjectPrm
objectAddressPrm
NewAttributes []objectSDK.Attribute
ReplaceAttribute bool
PayloadPatches []PayloadPatch
}
type PayloadPatch struct {
Range objectSDK.Range
PayloadPath string
}
type PatchRes struct {
OID oid.ID
}
func Patch(ctx context.Context, prm PatchObjectPrm) (*PatchRes, error) {
patchPrm := client.PrmObjectPatch{
XHeaders: prm.xHeaders,
BearerToken: prm.bearerToken,
Session: prm.sessionToken,
Address: prm.objAddr,
}
slices.SortFunc(prm.PayloadPatches, func(a, b PayloadPatch) int {
return cmp.Compare(a.Range.GetOffset(), b.Range.GetOffset())
})
patcher, err := prm.cli.ObjectPatchInit(ctx, patchPrm)
if err != nil {
return nil, fmt.Errorf("init payload reading: %w", err)
}
if patcher.PatchAttributes(ctx, prm.NewAttributes, prm.ReplaceAttribute) {
for _, pp := range prm.PayloadPatches {
payloadFile, err := os.OpenFile(pp.PayloadPath, os.O_RDONLY, os.ModePerm)
if err != nil {
return nil, err
}
applied := patcher.PatchPayload(ctx, &pp.Range, payloadFile)
_ = payloadFile.Close()
if !applied {
break
}
}
}
res, err := patcher.Close(ctx)
if err != nil {
return nil, err
}
return &PatchRes{
OID: res.ObjectID(),
}, nil
}

View file

@ -11,9 +11,9 @@ import (
// values and their usage descriptions.
const (
GenerateKey = "generate-key"
GenerateKeyShorthand = "g"
GenerateKeyDefault = false
GenerateKeyUsage = "Generate new private key"
generateKeyShorthand = "g"
generateKeyDefault = false
generateKeyUsage = "Generate new private key"
WalletPath = "wallet"
WalletPathShorthand = "w"
@ -50,13 +50,6 @@ const (
TracingFlag = "trace"
TracingFlagUsage = "Generate trace ID and print it."
AwaitFlag = "await"
AwaitFlagUsage = "Wait for the operation to complete"
QuietFlag = "quiet"
QuietFlagShorthand = "q"
QuietFlagUsage = "Print nothing and exit with non-zero code on failure"
)
// Init adds common flags to the command:
@ -79,7 +72,7 @@ func Init(cmd *cobra.Command) {
func InitWithoutRPC(cmd *cobra.Command) {
ff := cmd.Flags()
ff.BoolP(GenerateKey, GenerateKeyShorthand, GenerateKeyDefault, GenerateKeyUsage)
ff.BoolP(GenerateKey, generateKeyShorthand, generateKeyDefault, generateKeyUsage)
ff.StringP(WalletPath, WalletPathShorthand, WalletPathDefault, WalletPathUsage)
ff.StringP(Account, AccountShorthand, AccountDefault, AccountUsage)
}

View file

@ -24,8 +24,6 @@ var testCmd = &cobra.Command{
}
func Test_getOrGenerate(t *testing.T) {
t.Cleanup(viper.Reset)
dir := t.TempDir()
wallPath := filepath.Join(dir, "wallet.json")

View file

@ -1,139 +0,0 @@
package apemanager
import (
"encoding/hex"
"errors"
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/util"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
apeSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ape"
client_sdk "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
"github.com/spf13/cobra"
)
const (
chainIDFlag = "chain-id"
chainIDHexFlag = "chain-id-hex"
ruleFlag = "rule"
pathFlag = "path"
)
const (
targetNameFlag = "target-name"
targetNameDesc = "Resource name in APE resource name format"
targetTypeFlag = "target-type"
targetTypeDesc = "Resource type(container/namespace)"
)
const (
defaultNamespace = ""
namespaceTarget = "namespace"
containerTarget = "container"
userTarget = "user"
groupTarget = "group"
)
var errUnknownTargetType = errors.New("unknown target type")
var addCmd = &cobra.Command{
Use: "add",
Short: "Add rule chain for a target",
Run: add,
PersistentPreRun: func(cmd *cobra.Command, _ []string) {
commonflags.Bind(cmd)
},
}
func parseTarget(cmd *cobra.Command) (ct apeSDK.ChainTarget) {
typ, _ := cmd.Flags().GetString(targetTypeFlag)
name, _ := cmd.Flags().GetString(targetNameFlag)
ct.Name = name
switch typ {
case namespaceTarget:
ct.TargetType = apeSDK.TargetTypeNamespace
case containerTarget:
var cnr cid.ID
commonCmd.ExitOnErr(cmd, "can't decode container ID: %w", cnr.DecodeString(name))
ct.TargetType = apeSDK.TargetTypeContainer
case userTarget:
ct.TargetType = apeSDK.TargetTypeUser
case groupTarget:
ct.TargetType = apeSDK.TargetTypeGroup
default:
commonCmd.ExitOnErr(cmd, "read target type error: %w", errUnknownTargetType)
}
return ct
}
func parseChain(cmd *cobra.Command) apeSDK.Chain {
chainID, _ := cmd.Flags().GetString(chainIDFlag)
hexEncoded, _ := cmd.Flags().GetBool(chainIDHexFlag)
chainIDRaw := []byte(chainID)
if hexEncoded {
var err error
chainIDRaw, err = hex.DecodeString(chainID)
commonCmd.ExitOnErr(cmd, "can't decode chain ID as hex: %w", err)
}
chain := new(apechain.Chain)
chain.ID = apechain.ID(chainIDRaw)
if rules, _ := cmd.Flags().GetStringArray(ruleFlag); len(rules) > 0 {
commonCmd.ExitOnErr(cmd, "parser error: %w", util.ParseAPEChain(chain, rules))
} else if encPath, _ := cmd.Flags().GetString(pathFlag); encPath != "" {
commonCmd.ExitOnErr(cmd, "decode binary or json error: %w", util.ParseAPEChainBinaryOrJSON(chain, encPath))
} else {
commonCmd.ExitOnErr(cmd, "parser error: %w", errors.New("rule is not passed"))
}
cmd.Println("Parsed chain:")
util.PrintHumanReadableAPEChain(cmd, chain)
serialized := chain.Bytes()
return apeSDK.Chain{
Raw: serialized,
}
}
func add(cmd *cobra.Command, _ []string) {
c := parseChain(cmd)
target := parseTarget(cmd)
key := key.Get(cmd)
cli := internalclient.GetSDKClientByFlag(cmd, key, commonflags.RPC)
res, err := cli.APEManagerAddChain(cmd.Context(), client_sdk.PrmAPEManagerAddChain{
ChainTarget: target,
Chain: c,
})
commonCmd.ExitOnErr(cmd, "add chain error: %w", err)
cmd.Println("Rule has been added.")
cmd.Println("Chain ID: ", string(res.ChainID))
}
func initAddCmd() {
commonflags.Init(addCmd)
ff := addCmd.Flags()
ff.StringArray(ruleFlag, []string{}, "Rule statement")
ff.String(pathFlag, "", "Path to encoded chain in JSON or binary format")
ff.String(chainIDFlag, "", "Assign ID to the parsed chain")
ff.String(targetNameFlag, "", targetNameDesc)
ff.String(targetTypeFlag, "", targetTypeDesc)
_ = addCmd.MarkFlagRequired(targetTypeFlag)
ff.Bool(chainIDHexFlag, false, "Flag to parse chain ID as hex")
addCmd.MarkFlagsMutuallyExclusive(pathFlag, ruleFlag)
}

View file

@ -1,49 +0,0 @@
package apemanager
import (
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
apeutil "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/util"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
client_sdk "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
"github.com/spf13/cobra"
)
var listCmd = &cobra.Command{
Use: "list",
Short: "List rule chains defined on target",
Run: list,
PersistentPreRun: func(cmd *cobra.Command, _ []string) {
commonflags.Bind(cmd)
},
}
func list(cmd *cobra.Command, _ []string) {
target := parseTarget(cmd)
key := key.Get(cmd)
cli := internalclient.GetSDKClientByFlag(cmd, key, commonflags.RPC)
resp, err := cli.APEManagerListChains(cmd.Context(),
client_sdk.PrmAPEManagerListChains{
ChainTarget: target,
})
commonCmd.ExitOnErr(cmd, "list chains call error: %w", err)
for _, respChain := range resp.Chains {
var chain apechain.Chain
commonCmd.ExitOnErr(cmd, "decode error: %w", chain.DecodeBytes(respChain.Raw))
apeutil.PrintHumanReadableAPEChain(cmd, &chain)
}
}
func initListCmd() {
commonflags.Init(listCmd)
ff := listCmd.Flags()
ff.String(targetNameFlag, "", targetNameDesc)
ff.String(targetTypeFlag, "", targetTypeDesc)
_ = listCmd.MarkFlagRequired(targetTypeFlag)
}

View file

@ -1,66 +0,0 @@
package apemanager
import (
"encoding/hex"
"errors"
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
client_sdk "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
"github.com/spf13/cobra"
)
var (
errEmptyChainID = errors.New("chain id cannot be empty")
removeCmd = &cobra.Command{
Use: "remove",
Short: "Remove rule chain for a target",
Run: remove,
PersistentPreRun: func(cmd *cobra.Command, _ []string) {
commonflags.Bind(cmd)
},
}
)
func remove(cmd *cobra.Command, _ []string) {
target := parseTarget(cmd)
key := key.Get(cmd)
cli := internalclient.GetSDKClientByFlag(cmd, key, commonflags.RPC)
chainID, _ := cmd.Flags().GetString(chainIDFlag)
if chainID == "" {
commonCmd.ExitOnErr(cmd, "read chain id error: %w", errEmptyChainID)
}
chainIDRaw := []byte(chainID)
hexEncoded, _ := cmd.Flags().GetBool(chainIDHexFlag)
if hexEncoded {
var err error
chainIDRaw, err = hex.DecodeString(chainID)
commonCmd.ExitOnErr(cmd, "can't decode chain ID as hex: %w", err)
}
_, err := cli.APEManagerRemoveChain(cmd.Context(), client_sdk.PrmAPEManagerRemoveChain{
ChainTarget: target,
ChainID: chainIDRaw,
})
commonCmd.ExitOnErr(cmd, "remove chain error: %w", err)
cmd.Println("\nRule has been removed.")
}
func initRemoveCmd() {
commonflags.Init(removeCmd)
ff := removeCmd.Flags()
ff.String(targetNameFlag, "", targetNameDesc)
ff.String(targetTypeFlag, "", targetTypeDesc)
_ = removeCmd.MarkFlagRequired(targetTypeFlag)
ff.String(chainIDFlag, "", "Chain id")
ff.Bool(chainIDHexFlag, false, "Flag to parse chain ID as hex")
}

View file

@ -1,21 +0,0 @@
package apemanager
import (
"github.com/spf13/cobra"
)
var Cmd = &cobra.Command{
Use: "ape-manager",
Short: "Operations with APE manager",
Long: `Operations with APE manager`,
}
func init() {
Cmd.AddCommand(addCmd)
Cmd.AddCommand(removeCmd)
Cmd.AddCommand(listCmd)
initAddCmd()
initRemoveCmd()
initListCmd()
}

View file

@ -15,12 +15,10 @@ import (
eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
const (
eaclFlag = "eacl"
apeFlag = "ape"
issuedAtFlag = "issued-at"
notValidBeforeFlag = "not-valid-before"
ownerFlag = "owner"
@ -39,17 +37,10 @@ In this case --` + commonflags.RPC + ` flag should be specified and the epoch in
is set to current epoch + n.
`,
Run: createToken,
PersistentPreRun: func(cmd *cobra.Command, _ []string) {
ff := cmd.Flags()
_ = viper.BindPFlag(commonflags.WalletPath, ff.Lookup(commonflags.WalletPath))
_ = viper.BindPFlag(commonflags.Account, ff.Lookup(commonflags.Account))
},
}
func init() {
createCmd.Flags().StringP(eaclFlag, "e", "", "Path to the extended ACL table (mutually exclusive with --impersonate and --ape flag)")
createCmd.Flags().StringP(apeFlag, "a", "", "Path to the JSON-encoded APE override (mutually exclusive with --impersonate and --eacl flag)")
createCmd.Flags().StringP(eaclFlag, "e", "", "Path to the extended ACL table (mutually exclusive with --impersonate flag)")
createCmd.Flags().StringP(issuedAtFlag, "i", "+0", "Epoch to issue token at")
createCmd.Flags().StringP(notValidBeforeFlag, "n", "+0", "Not valid before epoch")
createCmd.Flags().StringP(commonflags.ExpireAt, "x", "", "The last active epoch for the token")
@ -58,15 +49,13 @@ func init() {
createCmd.Flags().Bool(jsonFlag, false, "Output token in JSON")
createCmd.Flags().Bool(impersonateFlag, false, "Mark token as impersonate to consider the token signer as the request owner (mutually exclusive with --eacl flag)")
createCmd.Flags().StringP(commonflags.RPC, commonflags.RPCShorthand, commonflags.RPCDefault, commonflags.RPCUsage)
createCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, commonflags.WalletPathDefault, commonflags.WalletPathUsage)
createCmd.Flags().StringP(commonflags.Account, commonflags.AccountShorthand, commonflags.AccountDefault, commonflags.AccountUsage)
createCmd.MarkFlagsMutuallyExclusive(eaclFlag, apeFlag, impersonateFlag)
createCmd.MarkFlagsMutuallyExclusive(eaclFlag, impersonateFlag)
_ = cobra.MarkFlagFilename(createCmd.Flags(), eaclFlag)
_ = cobra.MarkFlagFilename(createCmd.Flags(), apeFlag)
_ = cobra.MarkFlagRequired(createCmd.Flags(), commonflags.ExpireAt)
_ = cobra.MarkFlagRequired(createCmd.Flags(), ownerFlag)
_ = cobra.MarkFlagRequired(createCmd.Flags(), outFlag)
}
@ -107,16 +96,16 @@ func createToken(cmd *cobra.Command, _ []string) {
fmt.Errorf("expiration epoch is less than not-valid-before epoch: %d < %d", exp, nvb))
}
ownerStr, _ := cmd.Flags().GetString(ownerFlag)
var ownerID user.ID
commonCmd.ExitOnErr(cmd, "can't parse recipient: %w", ownerID.DecodeString(ownerStr))
var b bearer.Token
b.SetExp(exp)
b.SetNbf(nvb)
b.SetIat(iat)
if ownerStr, _ := cmd.Flags().GetString(ownerFlag); ownerStr != "" {
var ownerID user.ID
commonCmd.ExitOnErr(cmd, "can't parse recipient: %w", ownerID.DecodeString(ownerStr))
b.ForUser(ownerID)
}
b.ForUser(ownerID)
impersonate, _ := cmd.Flags().GetBool(impersonateFlag)
b.SetImpersonate(impersonate)
@ -130,14 +119,6 @@ func createToken(cmd *cobra.Command, _ []string) {
b.SetEACLTable(*table)
}
apePath, _ := cmd.Flags().GetString(apeFlag)
if apePath != "" {
var apeOverride bearer.APEOverride
raw, err := os.ReadFile(apePath)
commonCmd.ExitOnErr(cmd, "can't read APE rules: %w", err)
commonCmd.ExitOnErr(cmd, "can't parse APE rules: %w", json.Unmarshal(raw, &apeOverride))
b.SetAPEOverride(apeOverride)
}
var data []byte
toJSON, _ := cmd.Flags().GetBool(jsonFlag)

View file

@ -1,115 +0,0 @@
package bearer
import (
"errors"
"fmt"
"os"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
parseutil "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/util"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
apeSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ape"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
"github.com/spf13/cobra"
)
var (
errChainIDCannotBeEmpty = errors.New("chain id cannot be empty")
errRuleIsNotParsed = errors.New("rule is not passed")
)
const (
chainIDFlag = "chain-id"
chainIDHexFlag = "chain-id-hex"
ruleFlag = "rule"
pathFlag = "path"
outputFlag = "output"
)
var generateAPEOverrideCmd = &cobra.Command{
Use: "generate-ape-override",
Short: "Generate APE override.",
Long: `Generate APE override by target and APE chains. Util command.
Generated APE override can be dumped to a file in JSON format that is passed to
"create" command.
`,
Run: genereateAPEOverride,
}
func genereateAPEOverride(cmd *cobra.Command, _ []string) {
c := parseChain(cmd)
targetCID, _ := cmd.Flags().GetString(commonflags.CIDFlag)
var cid cidSDK.ID
commonCmd.ExitOnErr(cmd, "invalid cid format: %w", cid.DecodeString(targetCID))
override := &bearer.APEOverride{
Target: apeSDK.ChainTarget{
TargetType: apeSDK.TargetTypeContainer,
Name: targetCID,
},
Chains: []apeSDK.Chain{
{
Raw: c.Bytes(),
},
},
}
overrideMarshalled, err := override.MarshalJSON()
commonCmd.ExitOnErr(cmd, "failed to marshal APE override: %w", err)
outputPath, _ := cmd.Flags().GetString(outputFlag)
if outputPath != "" {
err := os.WriteFile(outputPath, []byte(overrideMarshalled), 0o644)
commonCmd.ExitOnErr(cmd, "dump error: %w", err)
} else {
fmt.Print("\n")
fmt.Println(string(overrideMarshalled))
}
}
func init() {
ff := generateAPEOverrideCmd.Flags()
ff.StringP(commonflags.CIDFlag, "", "", "Target container ID.")
_ = cobra.MarkFlagRequired(createCmd.Flags(), commonflags.CIDFlag)
ff.StringArray(ruleFlag, []string{}, "Rule statement")
ff.String(pathFlag, "", "Path to encoded chain in JSON or binary format")
ff.String(chainIDFlag, "", "Assign ID to the parsed chain")
ff.Bool(chainIDHexFlag, false, "Flag to parse chain ID as hex")
ff.String(outputFlag, "", "Output path to dump result JSON-encoded APE override")
_ = cobra.MarkFlagFilename(createCmd.Flags(), outputFlag)
}
func parseChainID(cmd *cobra.Command) apechain.ID {
chainID, _ := cmd.Flags().GetString(chainIDFlag)
if chainID == "" {
commonCmd.ExitOnErr(cmd, "read chain id error: %w",
errChainIDCannotBeEmpty)
}
return apechain.ID(chainID)
}
func parseChain(cmd *cobra.Command) *apechain.Chain {
chain := new(apechain.Chain)
if rules, _ := cmd.Flags().GetStringArray(ruleFlag); len(rules) > 0 {
commonCmd.ExitOnErr(cmd, "parser error: %w", parseutil.ParseAPEChain(chain, rules))
} else if encPath, _ := cmd.Flags().GetString(pathFlag); encPath != "" {
commonCmd.ExitOnErr(cmd, "decode binary or json error: %w", parseutil.ParseAPEChainBinaryOrJSON(chain, encPath))
} else {
commonCmd.ExitOnErr(cmd, "parser error: %w", errRuleIsNotParsed)
}
chain.ID = parseChainID(cmd)
cmd.Println("Parsed chain:")
parseutil.PrintHumanReadableAPEChain(cmd, chain)
return chain
}

View file

@ -11,5 +11,4 @@ var Cmd = &cobra.Command{
func init() {
Cmd.AddCommand(createCmd)
Cmd.AddCommand(generateAPEOverrideCmd)
}

View file

@ -58,27 +58,13 @@ It will be stored in sidechain when inner ring will accepts it.`,
"use --force option to skip this check: %w", err)
for i, nodes := range nodesByRep {
if repNum := placementPolicy.ReplicaDescriptor(i).NumberOfObjects(); repNum > 0 {
if repNum > uint32(len(nodes)) {
commonCmd.ExitOnErr(cmd, "", fmt.Errorf(
"the number of nodes '%d' in selector is not enough for the number of replicas '%d', "+
"use --force option to skip this check",
len(nodes),
repNum,
))
}
} else if ecParts := placementPolicy.ReplicaDescriptor(i).TotalECPartCount(); ecParts > 0 {
if ecParts > uint32(len(nodes)) {
commonCmd.ExitOnErr(cmd, "", fmt.Errorf(
"the number of nodes '%d' in selector is not enough for EC placement '%d.%d', "+
"use --force option to skip this check",
len(nodes),
placementPolicy.ReplicaDescriptor(i).GetECDataCount(),
placementPolicy.ReplicaDescriptor(i).GetECParityCount(),
))
}
} else {
commonCmd.ExitOnErr(cmd, "%w", errors.New("no replication policy is set"))
if placementPolicy.ReplicaNumberByIndex(i) > uint32(len(nodes)) {
commonCmd.ExitOnErr(cmd, "", fmt.Errorf(
"the number of nodes '%d' in selector is not enough for the number of replicas '%d', "+
"use --force option to skip this check",
len(nodes),
placementPolicy.ReplicaNumberByIndex(i),
))
}
}
}
@ -139,7 +125,7 @@ It will be stored in sidechain when inner ring will accepts it.`,
},
}
for range awaitTimeout {
for i := 0; i < awaitTimeout; i++ {
time.Sleep(1 * time.Second)
_, err := internalclient.GetContainer(cmd.Context(), getPrm)

View file

@ -110,7 +110,7 @@ Only owner of the container has a permission to remove container.`,
},
}
for range awaitTimeout {
for i := 0; i < awaitTimeout; i++ {
time.Sleep(1 * time.Second)
_, err := internalclient.GetContainer(cmd.Context(), getPrm)

View file

@ -0,0 +1,68 @@
package container
import (
"os"
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
"github.com/spf13/cobra"
)
var getExtendedACLCmd = &cobra.Command{
Use: "get-eacl",
Short: "Get extended ACL table of container",
Long: `Get extended ACL table of container`,
Run: func(cmd *cobra.Command, _ []string) {
id := parseContainerID(cmd)
pk := key.GetOrGenerate(cmd)
cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC)
eaclPrm := internalclient.EACLPrm{
Client: cli,
ClientParams: client.PrmContainerEACL{
ContainerID: &id,
},
}
res, err := internalclient.EACL(cmd.Context(), eaclPrm)
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
eaclTable := res.EACL()
if containerPathTo == "" {
cmd.Println("eACL: ")
common.PrettyPrintJSON(cmd, &eaclTable, "eACL")
return
}
var data []byte
if containerJSON {
data, err = eaclTable.MarshalJSON()
commonCmd.ExitOnErr(cmd, "can't encode to JSON: %w", err)
} else {
data, err = eaclTable.Marshal()
commonCmd.ExitOnErr(cmd, "can't encode to binary: %w", err)
}
cmd.Println("dumping data to file:", containerPathTo)
err = os.WriteFile(containerPathTo, data, 0o644)
commonCmd.ExitOnErr(cmd, "could not write eACL to file: %w", err)
},
}
func initContainerGetEACLCmd() {
commonflags.Init(getExtendedACLCmd)
flags := getExtendedACLCmd.Flags()
flags.StringVar(&containerID, commonflags.CIDFlag, "", commonflags.CIDFlagUsage)
flags.StringVar(&containerPathTo, "to", "", "Path to dump encoded container (default: binary encoded)")
flags.BoolVar(&containerJSON, commonflags.JSON, false, "Encode EACL table in json format")
}

View file

@ -1,6 +1,9 @@
package container
import (
"strings"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container"
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
@ -15,8 +18,6 @@ const (
flagListPrintAttr = "with-attr"
flagListContainerOwner = "owner"
flagListName = "name"
generateKeyContainerUsage = commonflags.GenerateKeyUsage + ", should be used with --owner flag"
)
// flag vars of list command.
@ -33,11 +34,6 @@ var listContainersCmd = &cobra.Command{
Run: func(cmd *cobra.Command, _ []string) {
var idUser user.ID
generateKey, _ := cmd.Flags().GetBool(commonflags.GenerateKey)
if flagVarListContainerOwner == "" && generateKey {
cmd.PrintErrln("WARN: using -g without --owner - output will be empty")
}
key := key.GetOrGenerate(cmd)
if flagVarListContainerOwner == "" {
@ -67,6 +63,7 @@ var listContainersCmd = &cobra.Command{
continue
}
cnrID := cnrID
prmGet.ClientParams.ContainerID = &cnrID
res, err := internalclient.GetContainer(cmd.Context(), prmGet)
if err != nil {
@ -81,8 +78,12 @@ var listContainersCmd = &cobra.Command{
cmd.Println(cnrID.String())
if flagVarListPrintAttr {
cnr.IterateUserAttributes(func(key, val string) {
cmd.Printf(" %s: %s\n", key, val)
cnr.IterateAttributes(func(key, val string) {
if !strings.HasPrefix(key, container.SysAttributePrefix) && !strings.HasPrefix(key, container.SysAttributePrefixNeoFS) {
// FIXME(@cthulhu-rider): https://git.frostfs.info/TrueCloudLab/frostfs-sdk-go/issues/97
// Use dedicated method to skip system attributes.
cmd.Printf(" %s: %s\n", key, val)
}
})
}
}
@ -103,5 +104,4 @@ func initContainerListContainersCmd() {
flags.BoolVar(&flagVarListPrintAttr, flagListPrintAttr, false,
"Request and print attributes of each container",
)
flags.Lookup(commonflags.GenerateKey).Usage = generateKeyContainerUsage
}

View file

@ -2,7 +2,6 @@ package container
import (
"crypto/sha256"
"errors"
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
@ -47,14 +46,7 @@ var containerNodesCmd = &cobra.Command{
commonCmd.ExitOnErr(cmd, "could not build container nodes for given container: %w", err)
for i := range cnrNodes {
if repNum := policy.ReplicaDescriptor(i).NumberOfObjects(); repNum > 0 {
cmd.Printf("Descriptor #%d, REP %d:\n", i+1, repNum)
} else if ecParts := policy.ReplicaDescriptor(i).TotalECPartCount(); ecParts > 0 {
cmd.Printf("Descriptor #%d, EC %d.%d:\n", i+1, policy.ReplicaDescriptor(i).GetECDataCount(),
policy.ReplicaDescriptor(i).GetECParityCount())
} else {
commonCmd.ExitOnErr(cmd, "%w", errors.New("no replication policy is set"))
}
cmd.Printf("Descriptor #%d, REP %d:\n", i+1, policy.ReplicaNumberByIndex(i))
for j := range cnrNodes[i] {
commonCmd.PrettyPrintNodeInfo(cmd, cnrNodes[i][j], j, "\t", short)
}

View file

@ -20,12 +20,14 @@ import (
type policyPlaygroundREPL struct {
cmd *cobra.Command
args []string
nodes map[string]netmap.NodeInfo
}
func newPolicyPlaygroundREPL(cmd *cobra.Command) (*policyPlaygroundREPL, error) {
func newPolicyPlaygroundREPL(cmd *cobra.Command, args []string) (*policyPlaygroundREPL, error) {
return &policyPlaygroundREPL{
cmd: cmd,
args: args,
nodes: map[string]netmap.NodeInfo{},
}, nil
}
@ -219,8 +221,8 @@ var policyPlaygroundCmd = &cobra.Command{
Short: "A REPL for testing placement policies",
Long: `A REPL for testing placement policies.
If a wallet and endpoint is provided, the initial netmap data will be loaded from the snapshot of the node. Otherwise, an empty playground is created.`,
Run: func(cmd *cobra.Command, _ []string) {
repl, err := newPolicyPlaygroundREPL(cmd)
Run: func(cmd *cobra.Command, args []string) {
repl, err := newPolicyPlaygroundREPL(cmd, args)
commonCmd.ExitOnErr(cmd, "could not create policy playground: %w", err)
commonCmd.ExitOnErr(cmd, "policy playground failed: %w", repl.run())
},

View file

@ -25,6 +25,8 @@ func init() {
deleteContainerCmd,
listContainerObjectsCmd,
getContainerInfoCmd,
getExtendedACLCmd,
setExtendedACLCmd,
containerNodesCmd,
policyPlaygroundCmd,
}
@ -36,6 +38,8 @@ func init() {
initContainerDeleteCmd()
initContainerListObjectsCmd()
initContainerInfoCmd()
initContainerGetEACLCmd()
initContainerSetEACLCmd()
initContainerNodesCmd()
initContainerPolicyPlaygroundCmd()
@ -49,6 +53,7 @@ func init() {
}{
{createContainerCmd, "PUT"},
{deleteContainerCmd, "DELETE"},
{setExtendedACLCmd, "SETEACL"},
} {
commonflags.InitSession(el.cmd, "container "+el.verb)
}

View file

@ -0,0 +1,108 @@
package container
import (
"bytes"
"errors"
"time"
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
"github.com/spf13/cobra"
)
var flagVarsSetEACL struct {
noPreCheck bool
srcPath string
}
var setExtendedACLCmd = &cobra.Command{
Use: "set-eacl",
Short: "Set new extended ACL table for container",
Long: `Set new extended ACL table for container.
Container ID in EACL table will be substituted with ID from the CLI.`,
Run: func(cmd *cobra.Command, _ []string) {
id := parseContainerID(cmd)
eaclTable := common.ReadEACL(cmd, flagVarsSetEACL.srcPath)
tok := getSession(cmd)
eaclTable.SetCID(id)
pk := key.GetOrGenerate(cmd)
cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC)
if !flagVarsSetEACL.noPreCheck {
cmd.Println("Checking the ability to modify access rights in the container...")
extendable, err := internalclient.IsACLExtendable(cmd.Context(), cli, id)
commonCmd.ExitOnErr(cmd, "Extensibility check failure: %w", err)
if !extendable {
commonCmd.ExitOnErr(cmd, "", errors.New("container ACL is immutable"))
}
cmd.Println("ACL extension is enabled in the container, continue processing.")
}
setEACLPrm := internalclient.SetEACLPrm{
Client: cli,
ClientParams: client.PrmContainerSetEACL{
Table: eaclTable,
Session: tok,
},
}
_, err := internalclient.SetEACL(cmd.Context(), setEACLPrm)
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
if containerAwait {
exp, err := eaclTable.Marshal()
commonCmd.ExitOnErr(cmd, "broken EACL table: %w", err)
cmd.Println("awaiting...")
getEACLPrm := internalclient.EACLPrm{
Client: cli,
ClientParams: client.PrmContainerEACL{
ContainerID: &id,
},
}
for i := 0; i < awaitTimeout; i++ {
time.Sleep(1 * time.Second)
res, err := internalclient.EACL(cmd.Context(), getEACLPrm)
if err == nil {
// compare binary values because EACL could have been set already
table := res.EACL()
got, err := table.Marshal()
if err != nil {
continue
}
if bytes.Equal(exp, got) {
cmd.Println("EACL has been persisted on sidechain")
return
}
}
}
commonCmd.ExitOnErr(cmd, "", errSetEACLTimeout)
}
},
}
func initContainerSetEACLCmd() {
commonflags.Init(setExtendedACLCmd)
flags := setExtendedACLCmd.Flags()
flags.StringVar(&containerID, commonflags.CIDFlag, "", commonflags.CIDFlagUsage)
flags.StringVar(&flagVarsSetEACL.srcPath, "table", "", "path to file with JSON or binary encoded EACL table")
flags.BoolVar(&containerAwait, "await", false, "block execution until EACL is persisted")
flags.BoolVar(&flagVarsSetEACL.noPreCheck, "no-precheck", false, "do not pre-check the extensibility of the container ACL")
}

View file

@ -18,8 +18,9 @@ const (
)
var (
errCreateTimeout = errors.New("timeout: container has not been persisted on sidechain")
errDeleteTimeout = errors.New("timeout: container has not been removed from sidechain")
errCreateTimeout = errors.New("timeout: container has not been persisted on sidechain")
errDeleteTimeout = errors.New("timeout: container has not been removed from sidechain")
errSetEACLTimeout = errors.New("timeout: EACL has not been persisted on sidechain")
)
func parseContainerID(cmd *cobra.Command) cid.ID {

View file

@ -24,7 +24,7 @@ var addRuleCmd = &cobra.Command{
Long: "Add local APE rule to a node with following format:\n<action>[:action_detail] <operation> [<condition1> ...] <resource>",
Example: `control add-rule --endpoint ... -w ... --address ... --chain-id ChainID --cid ... --rule "allow Object.Get *"
--rule "deny Object.Get EbxzAdz5LB4uqxuz6crWKAumBNtZyK2rKsqQP7TdZvwr/*"
--rule "deny:QuotaLimitReached Object.Put ResourceCondition:Department=HR *"
--rule "deny:QuotaLimitReached Object.Put Object.Resource:Department=HR *"
control add-rule --endpoint ... -w ... --address ... --chain-id ChainID --cid ... --path some_chain.json
`,

View file

@ -1,10 +1,7 @@
package control
import (
"os"
rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
@ -27,7 +24,6 @@ func initControlHealthCheckCmd() {
flags := healthCheckCmd.Flags()
flags.Bool(healthcheckIRFlag, false, "Communicate with IR node")
flags.BoolP(commonflags.QuietFlag, commonflags.QuietFlagShorthand, false, commonflags.QuietFlagUsage)
_ = flags.MarkDeprecated(healthcheckIRFlag, "for health check of inner ring nodes, use the 'control ir healthcheck' command instead.")
}
@ -54,12 +50,6 @@ func healthCheck(cmd *cobra.Command, args []string) {
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
verifyResponse(cmd, resp.GetSignature(), resp.GetBody())
if quietFlag, _ := cmd.Flags().GetBool(commonflags.QuietFlag); quietFlag {
if resp.GetBody().GetHealthStatus() == control.HealthStatus_READY {
return
}
os.Exit(1)
}
cmd.Printf("Network status: %s\n", resp.GetBody().GetNetmapStatus())
cmd.Printf("Health status: %s\n", resp.GetBody().GetHealthStatus())

View file

@ -1,10 +1,7 @@
package control
import (
"os"
rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
ircontrol "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir"
@ -21,8 +18,6 @@ var irHealthCheckCmd = &cobra.Command{
func initControlIRHealthCheckCmd() {
initControlFlags(irHealthCheckCmd)
flags := irHealthCheckCmd.Flags()
flags.BoolP(commonflags.QuietFlag, commonflags.QuietFlagShorthand, false, commonflags.QuietFlagUsage)
}
func irHealthCheck(cmd *cobra.Command, _ []string) {
@ -44,12 +39,6 @@ func irHealthCheck(cmd *cobra.Command, _ []string) {
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
verifyResponse(cmd, resp.GetSignature(), resp.GetBody())
if quietFlag, _ := cmd.Flags().GetBool(commonflags.QuietFlag); quietFlag {
if resp.GetBody().GetHealthStatus() == ircontrol.HealthStatus_READY {
return
}
os.Exit(1)
}
cmd.Printf("Health status: %s\n", resp.GetBody().GetHealthStatus())
}

View file

@ -27,8 +27,6 @@ const (
defaultNamespace = "root"
namespaceTarget = "namespace"
containerTarget = "container"
userTarget = "user"
groupTarget = "group"
)
const (
@ -68,16 +66,6 @@ func parseTarget(cmd *cobra.Command) *control.ChainTarget {
Name: name,
Type: control.ChainTarget_CONTAINER,
}
case userTarget:
return &control.ChainTarget{
Name: name,
Type: control.ChainTarget_USER,
}
case groupTarget:
return &control.ChainTarget{
Name: name,
Type: control.ChainTarget_GROUP,
}
default:
commonCmd.ExitOnErr(cmd, "read target type error: %w", errUnknownTargetType)
}

View file

@ -13,8 +13,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"github.com/spf13/cobra"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
const (
@ -54,10 +52,6 @@ func listTargets(cmd *cobra.Command, _ []string) {
resp, err = control.ListTargetsLocalOverrides(client, req)
return err
})
if err != nil && status.Code(err) == codes.NotFound {
cmd.Println("Local overrides are not defined for any target.")
return
}
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
verifyResponse(cmd, resp.GetSignature(), resp.GetBody())

View file

@ -1,88 +0,0 @@
package control
import (
"fmt"
rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
"github.com/mr-tron/base58"
"github.com/spf13/cobra"
)
const (
fillPercentFlag = "fill_percent"
)
var shardsRebuildCmd = &cobra.Command{
Use: "rebuild",
Short: "Rebuild shards",
Long: "Rebuild reclaims storage occupied by dead objects and adjusts the storage structure according to the configuration (for blobovnicza only now)",
Run: shardsRebuild,
}
func shardsRebuild(cmd *cobra.Command, _ []string) {
pk := key.Get(cmd)
req := &control.StartShardRebuildRequest{
Body: &control.StartShardRebuildRequest_Body{
Shard_ID: getShardIDList(cmd),
TargetFillPercent: getFillPercentValue(cmd),
ConcurrencyLimit: getConcurrencyValue(cmd),
},
}
signRequest(cmd, pk, req)
cli := getClient(cmd, pk)
var resp *control.StartShardRebuildResponse
var err error
err = cli.ExecRaw(func(client *rawclient.Client) error {
resp, err = control.StartShardRebuild(client, req)
return err
})
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
verifyResponse(cmd, resp.GetSignature(), resp.GetBody())
var success, failed uint
for _, res := range resp.GetBody().GetResults() {
if res.GetSuccess() {
success++
cmd.Printf("Shard %s: OK\n", base58.Encode(res.GetShard_ID()))
} else {
failed++
cmd.Printf("Shard %s: failed with error %q\n", base58.Encode(res.GetShard_ID()), res.GetError())
}
}
cmd.Printf("Total: %d success, %d failed\n", success, failed)
}
func getFillPercentValue(cmd *cobra.Command) uint32 {
v, _ := cmd.Flags().GetUint32(fillPercentFlag)
if v <= 0 || v > 100 {
commonCmd.ExitOnErr(cmd, "invalid fill_percent value", fmt.Errorf("fill_percent value must be (0, 100], current value: %d", v))
}
return v
}
func getConcurrencyValue(cmd *cobra.Command) uint32 {
v, _ := cmd.Flags().GetUint32(concurrencyFlag)
if v <= 0 || v > 10000 {
commonCmd.ExitOnErr(cmd, "invalid concurrency value", fmt.Errorf("concurrency value must be (0, 10 000], current value: %d", v))
}
return v
}
func initControlShardRebuildCmd() {
initControlFlags(shardsRebuildCmd)
flags := shardsRebuildCmd.Flags()
flags.StringSlice(shardIDFlag, nil, "List of shard IDs in base58 encoding")
flags.Bool(shardAllFlag, false, "Process all shards")
flags.Uint32(fillPercentFlag, 80, "Target fill percent to reclaim space")
flags.Uint32(concurrencyFlag, 20, "Maximum count of concurrently rebuilding files")
setShardModeCmd.MarkFlagsMutuallyExclusive(shardIDFlag, shardAllFlag)
}

View file

@ -1,10 +1,7 @@
package control
import (
"crypto/ecdsa"
"errors"
"fmt"
"time"
rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common"
@ -12,7 +9,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
"github.com/spf13/cobra"
)
@ -22,13 +18,8 @@ const (
netmapStatusOnline = "online"
netmapStatusOffline = "offline"
netmapStatusMaintenance = "maintenance"
maxSetStatusMaxWaitTime = 30 * time.Minute
setStatusWaitTimeout = 30 * time.Second
)
var errNetmapStatusAwaitFailed = errors.New("netmap status hasn't changed for 30 minutes")
var setNetmapStatusCmd = &cobra.Command{
Use: "set-status",
Short: "Set status of the storage node in FrostFS network map",
@ -52,8 +43,6 @@ func initControlSetNetmapStatusCmd() {
flags.BoolP(commonflags.ForceFlag, commonflags.ForceFlagShorthand, false,
"Force turning to local maintenance")
flags.Bool(commonflags.AwaitFlag, false, commonflags.AwaitFlagUsage)
}
func setNetmapStatus(cmd *cobra.Command, _ []string) {
@ -67,27 +56,22 @@ func setNetmapStatus(cmd *cobra.Command, _ []string) {
}
}
await, _ := cmd.Flags().GetBool(commonflags.AwaitFlag)
var targetStatus control.NetmapStatus
switch st, _ := cmd.Flags().GetString(netmapStatusFlag); st {
default:
commonCmd.ExitOnErr(cmd, "", fmt.Errorf("unsupported status %s", st))
case netmapStatusOnline:
body.SetStatus(control.NetmapStatus_ONLINE)
printIgnoreForce(control.NetmapStatus_ONLINE)
targetStatus = control.NetmapStatus_ONLINE
case netmapStatusOffline:
body.SetStatus(control.NetmapStatus_OFFLINE)
printIgnoreForce(control.NetmapStatus_OFFLINE)
targetStatus = control.NetmapStatus_OFFLINE
case netmapStatusMaintenance:
body.SetStatus(control.NetmapStatus_MAINTENANCE)
if force {
body.SetForceMaintenance(true)
body.SetForceMaintenance()
common.PrintVerbose(cmd, "Local maintenance will be forced.")
}
targetStatus = control.NetmapStatus_MAINTENANCE
}
req := new(control.SetNetmapStatusRequest)
@ -108,52 +92,4 @@ func setNetmapStatus(cmd *cobra.Command, _ []string) {
verifyResponse(cmd, resp.GetSignature(), resp.GetBody())
cmd.Println("Network status update request successfully sent.")
if await {
awaitSetNetmapStatus(cmd, pk, cli, targetStatus)
}
}
func awaitSetNetmapStatus(cmd *cobra.Command, pk *ecdsa.PrivateKey, cli *client.Client, targetStatus control.NetmapStatus) {
req := &control.GetNetmapStatusRequest{
Body: &control.GetNetmapStatusRequest_Body{},
}
signRequest(cmd, pk, req)
var epoch uint64
var status control.NetmapStatus
startTime := time.Now()
cmd.Println("Wait until epoch and netmap status change...")
for {
var resp *control.GetNetmapStatusResponse
var err error
err = cli.ExecRaw(func(client *rawclient.Client) error {
resp, err = control.GetNetmapStatus(client, req)
return err
})
commonCmd.ExitOnErr(cmd, "failed to get current netmap status: %w", err)
if epoch == 0 {
epoch = resp.GetBody().GetEpoch()
}
status = resp.GetBody().GetStatus()
if resp.GetBody().GetEpoch() > epoch {
epoch = resp.GetBody().GetEpoch()
cmd.Printf("Epoch changed to %d\n", resp.GetBody().GetEpoch())
}
if status == targetStatus {
break
}
if time.Since(startTime) > maxSetStatusMaxWaitTime {
commonCmd.ExitOnErr(cmd, "failed to wait netmap status: %w", errNetmapStatusAwaitFailed)
return
}
time.Sleep(setStatusWaitTimeout)
cmd.Printf("Current netmap status '%s', target status '%s'\n", status.String(), targetStatus.String())
}
cmd.Printf("Netmap status changed to '%s' successfully.\n", status.String())
}

View file

@ -19,7 +19,6 @@ func initControlShardsCmd() {
shardsCmd.AddCommand(doctorCmd)
shardsCmd.AddCommand(writecacheShardCmd)
shardsCmd.AddCommand(shardsDetachCmd)
shardsCmd.AddCommand(shardsRebuildCmd)
initControlShardsListCmd()
initControlSetShardModeCmd()
@ -29,5 +28,4 @@ func initControlShardsCmd() {
initControlDoctorCmd()
initControlShardsWritecacheCmd()
initControlShardsDetachCmd()
initControlShardRebuildCmd()
}

View file

@ -61,18 +61,17 @@ func listShards(cmd *cobra.Command, _ []string) {
}
}
func prettyPrintShardsJSON(cmd *cobra.Command, ii []control.ShardInfo) {
func prettyPrintShardsJSON(cmd *cobra.Command, ii []*control.ShardInfo) {
out := make([]map[string]any, 0, len(ii))
for _, i := range ii {
out = append(out, map[string]any{
"shard_id": base58.Encode(i.GetShard_ID()),
"mode": shardModeToString(i.GetMode()),
"metabase": i.GetMetabasePath(),
"blobstor": i.GetBlobstor(),
"writecache": i.GetWritecachePath(),
"pilorama": i.GetPiloramaPath(),
"error_count": i.GetErrorCount(),
"evacuation_in_progress": i.GetEvacuationInProgress(),
"shard_id": base58.Encode(i.GetShard_ID()),
"mode": shardModeToString(i.GetMode()),
"metabase": i.GetMetabasePath(),
"blobstor": i.GetBlobstor(),
"writecache": i.GetWritecachePath(),
"pilorama": i.GetPiloramaPath(),
"error_count": i.GetErrorCount(),
})
}
@ -81,10 +80,10 @@ func prettyPrintShardsJSON(cmd *cobra.Command, ii []control.ShardInfo) {
enc.SetIndent("", " ")
commonCmd.ExitOnErr(cmd, "cannot shard info to JSON: %w", enc.Encode(out))
cmd.Print(buf.String()) // pretty printer emits newline, so no need for Println
cmd.Print(buf.String()) // pretty printer emits newline, to no need for Println
}
func prettyPrintShards(cmd *cobra.Command, ii []control.ShardInfo) {
func prettyPrintShards(cmd *cobra.Command, ii []*control.ShardInfo) {
for _, i := range ii {
pathPrinter := func(name, path string) string {
if path == "" {
@ -106,8 +105,7 @@ func prettyPrintShards(cmd *cobra.Command, ii []control.ShardInfo) {
sb.String()+
pathPrinter("Write-cache", i.GetWritecachePath())+
pathPrinter("Pilorama", i.GetPiloramaPath())+
fmt.Sprintf("Error count: %d\n", i.GetErrorCount())+
fmt.Sprintf("Evacuation in progress: %t\n", i.GetEvacuationInProgress()),
fmt.Sprintf("Error count: %d\n", i.GetErrorCount()),
base58.Encode(i.GetShard_ID()),
shardModeToString(i.GetMode()),
)
@ -123,7 +121,7 @@ func shardModeToString(m control.ShardMode) string {
return "unknown"
}
func sortShardsByID(ii []control.ShardInfo) {
func sortShardsByID(ii []*control.ShardInfo) {
sort.Slice(ii, func(i, j int) bool {
return bytes.Compare(ii[i].GetShard_ID(), ii[j].GetShard_ID()) < 0
})

View file

@ -3,7 +3,7 @@ package control
import (
"bytes"
"fmt"
"slices"
"sort"
"strings"
rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
@ -117,10 +117,10 @@ func setShardMode(cmd *cobra.Command, _ []string) {
req.SetBody(body)
body.SetMode(mode)
body.SetShard_ID(getShardIDList(cmd))
body.SetShardIDList(getShardIDList(cmd))
reset, _ := cmd.Flags().GetBool(shardClearErrorsFlag)
body.SetResetErrorCounter(reset)
body.ClearErrorCounter(reset)
signRequest(cmd, pk, req)
@ -177,6 +177,9 @@ func getShardIDListFromIDFlag(cmd *cobra.Command, withAllFlag bool) [][]byte {
res = append(res, raw)
}
slices.SortFunc(res, bytes.Compare)
sort.Slice(res, func(i, j int) bool {
return bytes.Compare(res[i], res[j]) < 0
})
return res
}

View file

@ -9,7 +9,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage"
controlSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"github.com/spf13/cobra"
)
@ -60,7 +60,7 @@ func synchronizeTree(cmd *cobra.Command, _ []string) {
},
}
err := ctrlmessage.Sign(pk, req)
err := controlSvc.SignMessage(pk, req)
commonCmd.ExitOnErr(cmd, "could not sign request: %w", err)
cli := getClient(cmd, pk)

View file

@ -8,7 +8,7 @@ import (
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage"
controlSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
frostfscrypto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto"
"github.com/spf13/cobra"
@ -33,8 +33,8 @@ func initControlIRFlags(cmd *cobra.Command) {
ff.Uint32(irFlagNameVUB, 0, "Valid until block value for notary transaction")
}
func signRequest(cmd *cobra.Command, pk *ecdsa.PrivateKey, req ctrlmessage.SignedMessage) {
err := ctrlmessage.Sign(pk, req)
func signRequest(cmd *cobra.Command, pk *ecdsa.PrivateKey, req controlSvc.SignedMessage) {
err := controlSvc.SignMessage(pk, req)
commonCmd.ExitOnErr(cmd, "could not sign request: %w", err)
}
@ -44,7 +44,7 @@ func verifyResponse(cmd *cobra.Command,
GetSign() []byte
},
body interface {
MarshalProtobuf([]byte) []byte
StableMarshal([]byte) []byte
},
) {
if sigControl == nil {
@ -60,7 +60,7 @@ func verifyResponse(cmd *cobra.Command,
var sig frostfscrypto.Signature
commonCmd.ExitOnErr(cmd, "can't read signature: %w", sig.ReadFromV2(sigV2))
if !sig.Verify(body.MarshalProtobuf(nil)) {
if !sig.Verify(body.StableMarshal(nil)) {
commonCmd.ExitOnErr(cmd, "", errors.New("invalid response signature"))
}
}

View file

@ -9,12 +9,6 @@ import (
"github.com/spf13/cobra"
)
const (
asyncFlag = "async"
restoreModeFlag = "restore-mode"
shrinkFlag = "shrink"
)
var writecacheShardCmd = &cobra.Command{
Use: "writecache",
Short: "Operations with storage node's write-cache",
@ -32,16 +26,10 @@ func sealWritecache(cmd *cobra.Command, _ []string) {
pk := key.Get(cmd)
ignoreErrors, _ := cmd.Flags().GetBool(ignoreErrorsFlag)
async, _ := cmd.Flags().GetBool(asyncFlag)
restoreMode, _ := cmd.Flags().GetBool(restoreModeFlag)
shrink, _ := cmd.Flags().GetBool(shrinkFlag)
req := &control.SealWriteCacheRequest{Body: &control.SealWriteCacheRequest_Body{
Shard_ID: getShardIDList(cmd),
IgnoreErrors: ignoreErrors,
Async: async,
RestoreMode: restoreMode,
Shrink: shrink,
}}
signRequest(cmd, pk, req)
@ -80,9 +68,6 @@ func initControlShardsWritecacheCmd() {
ff.StringSlice(shardIDFlag, nil, "List of shard IDs in base58 encoding")
ff.Bool(shardAllFlag, false, "Process all shards")
ff.Bool(ignoreErrorsFlag, true, "Skip invalid/unreadable objects")
ff.Bool(asyncFlag, false, "Run operation in background")
ff.Bool(restoreModeFlag, false, "Restore writecache's mode after sealing")
ff.Bool(shrinkFlag, false, "Shrink writecache's internal storage")
sealWritecacheShardCmd.MarkFlagsMutuallyExclusive(shardIDFlag, shardAllFlag)
}

View file

@ -43,8 +43,6 @@ var netInfoCmd = &cobra.Command{
cmd.Printf(format, "Epoch duration", netInfo.EpochDuration())
cmd.Printf(format, "Inner Ring candidate fee", netInfo.IRCandidateFee())
cmd.Printf(format, "Maximum object size", netInfo.MaxObjectSize())
cmd.Printf(format, "Maximum count of data shards", netInfo.MaxECDataCount())
cmd.Printf(format, "Maximum count of parity shards", netInfo.MaxECParityCount())
cmd.Printf(format, "Withdrawal fee", netInfo.WithdrawalFee())
cmd.Printf(format, "Homomorphic hashing disabled", netInfo.HomomorphicHashingDisabled())
cmd.Printf(format, "Maintenance mode allowed", netInfo.MaintenanceModeAllowed())

View file

@ -99,10 +99,6 @@ func getObject(cmd *cobra.Command, _ []string) {
return
}
if ok := printECInfoErr(cmd, err); ok {
return
}
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
}

View file

@ -70,10 +70,6 @@ func getObjectHeader(cmd *cobra.Command, _ []string) {
return
}
if ok := printECInfoErr(cmd, err); ok {
return
}
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
}
@ -175,15 +171,6 @@ func printHeader(cmd *cobra.Command, obj *objectSDK.Object) error {
cmd.Printf(" signature: %s\n", hex.EncodeToString(sigV2.GetSign()))
}
if ecHeader := obj.ECHeader(); ecHeader != nil {
cmd.Print("EC header:\n")
cmd.Printf(" parent object ID: %s\n", ecHeader.Parent().EncodeToString())
cmd.Printf(" index: %d\n", ecHeader.Index())
cmd.Printf(" total: %d\n", ecHeader.Total())
cmd.Printf(" header length: %d\n", ecHeader.HeaderLength())
}
return printSplitHeader(cmd, obj)
}

View file

@ -1,22 +1,19 @@
package object
import (
"bytes"
"cmp"
"context"
"crypto/ecdsa"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"slices"
"strconv"
"sync"
"text/tabwriter"
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/policy"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
@ -30,49 +27,21 @@ import (
)
const (
verifyPresenceAllFlag = "verify-presence-all"
preferInternalAddressesFlag = "prefer-internal-addresses"
verifyPresenceAllFlag = "verify-presence-all"
)
var (
errNoAvailableEndpoint = errors.New("failed to create client: no available endpoint")
errMalformedComplexObject = errors.New("object consists of EC and non EC parts")
)
var errNoAvailableEndpoint = errors.New("failed to create client: no available endpoint")
type phyObject struct {
containerID cid.ID
objectID oid.ID
storedOnAllContainerNodes bool
ecHeader *ecHeader
type objectNodesInfo struct {
containerID cid.ID
objectID oid.ID
relatedObjectIDs []oid.ID
isLockOrTombstone bool
}
type ecHeader struct {
index uint32
parent oid.ID
}
type objectPlacement struct {
requiredNodes []netmapSDK.NodeInfo
confirmedNodes []netmapSDK.NodeInfo
}
type objectNodesResult struct {
errors []error
placements map[oid.ID]objectPlacement
}
type ObjNodesDataObject struct {
ObjectID string `json:"object_id"`
RequiredNodes []string `json:"required_nodes,omitempty"`
ConfirmedNodes []string `json:"confirmed_nodes,omitempty"`
ECParentObjectID *string `json:"ec_parent_object_id,omitempty"`
ECIndex *uint32 `json:"ec_index,omitempty"`
}
type objNodesResultJSON struct {
ObjectID string `json:"object_id"`
DataObjects []ObjNodesDataObject `json:"data_objects,omitempty"`
Errors []string `json:"errors,omitempty"`
type boolError struct {
value bool
err error
}
var objectNodesCmd = &cobra.Command{
@ -80,7 +49,7 @@ var objectNodesCmd = &cobra.Command{
Short: "List of nodes where the object is stored",
Long: `List of nodes where the object should be stored and where it is actually stored.
Lock objects must exist on all nodes of the container.
For complex and EC objects, a node is considered to store an object if the node stores at least one part of the complex object or one chunk of the EC object.
For complex objects, a node is considered to store an object if the node stores at least one part of the complex object.
By default, the actual storage of the object is checked only on the nodes that should store the object. To check all nodes, use the flag --verify-presence-all.`,
Run: objectNodes,
}
@ -96,9 +65,7 @@ func initObjectNodesCmd() {
flags.String(commonflags.OIDFlag, "", commonflags.OIDFlagUsage)
_ = objectGetCmd.MarkFlagRequired(commonflags.OIDFlag)
flags.Bool(verifyPresenceAllFlag, false, "Verify the actual presence of the object on all netmap nodes.")
flags.Bool(commonflags.JSON, false, "Print information about the object placement as json.")
flags.Bool(preferInternalAddressesFlag, false, "Use internal addresses first to get object info.")
flags.Bool("verify-presence-all", false, "Verify the actual presence of the object on all netmap nodes")
}
func objectNodes(cmd *cobra.Command, _ []string) {
@ -109,18 +76,18 @@ func objectNodes(cmd *cobra.Command, _ []string) {
pk := key.GetOrGenerate(cmd)
cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC)
objects := getPhyObjects(cmd, cnrID, objID, cli, pk)
objectInfo := getObjectInfo(cmd, cnrID, objID, cli, pk)
placementPolicy, netmap := getPlacementPolicyAndNetmap(cmd, cnrID, cli)
result := getRequiredPlacement(cmd, objects, placementPolicy, netmap)
requiredPlacement := getRequiredPlacement(cmd, objectInfo, placementPolicy, netmap)
getActualPlacement(cmd, netmap, pk, objects, result)
actualPlacement := getActualPlacement(cmd, netmap, requiredPlacement, pk, objectInfo)
printPlacement(cmd, objID, objects, result)
printPlacement(cmd, netmap, requiredPlacement, actualPlacement)
}
func getPhyObjects(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, pk *ecdsa.PrivateKey) []phyObject {
func getObjectInfo(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, pk *ecdsa.PrivateKey) *objectNodesInfo {
var addrObj oid.Address
addrObj.SetContainer(cnrID)
addrObj.SetObject(objID)
@ -135,137 +102,44 @@ func getPhyObjects(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.C
res, err := internalclient.HeadObject(cmd.Context(), prmHead)
if err == nil {
obj := phyObject{
containerID: cnrID,
objectID: objID,
storedOnAllContainerNodes: res.Header().Type() == objectSDK.TypeLock ||
res.Header().Type() == objectSDK.TypeTombstone ||
len(res.Header().Children()) > 0,
return &objectNodesInfo{
containerID: cnrID,
objectID: objID,
isLockOrTombstone: res.Header().Type() == objectSDK.TypeLock || res.Header().Type() == objectSDK.TypeTombstone,
}
if res.Header().ECHeader() != nil {
obj.ecHeader = &ecHeader{
index: res.Header().ECHeader().Index(),
parent: res.Header().ECHeader().Parent(),
}
}
return []phyObject{obj}
}
var errSplitInfo *objectSDK.SplitInfoError
if errors.As(err, &errSplitInfo) {
return getComplexObjectParts(cmd, cnrID, objID, cli, prmHead, errSplitInfo)
if !errors.As(err, &errSplitInfo) {
commonCmd.ExitOnErr(cmd, "failed to get object info: %w", err)
return nil
}
var ecInfoError *objectSDK.ECInfoError
if errors.As(err, &ecInfoError) {
return getECObjectChunks(cmd, cnrID, objID, ecInfoError)
}
commonCmd.ExitOnErr(cmd, "failed to get object info: %w", err)
return nil
}
func getComplexObjectParts(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, prmHead internalclient.HeadObjectPrm, errSplitInfo *objectSDK.SplitInfoError) []phyObject {
members := getCompexObjectMembers(cmd, cnrID, objID, cli, prmHead, errSplitInfo)
return flattenComplexMembersIfECContainer(cmd, cnrID, members, prmHead)
}
func getCompexObjectMembers(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, prmHead internalclient.HeadObjectPrm, errSplitInfo *objectSDK.SplitInfoError) []oid.ID {
splitInfo := errSplitInfo.SplitInfo()
if members, ok := tryGetSplitMembersByLinkingObject(cmd, splitInfo, prmHead, cnrID); ok {
return members
return &objectNodesInfo{
containerID: cnrID,
objectID: objID,
relatedObjectIDs: members,
}
}
if members, ok := tryGetSplitMembersBySplitID(cmd, splitInfo, cli, cnrID); ok {
return members
}
return tryRestoreChainInReverse(cmd, splitInfo, prmHead, cli, cnrID, objID)
}
func flattenComplexMembersIfECContainer(cmd *cobra.Command, cnrID cid.ID, members []oid.ID, prmHead internalclient.HeadObjectPrm) []phyObject {
result := make([]phyObject, 0, len(members))
var hasNonEC, hasEC bool
var resultGuard sync.Mutex
if len(members) == 0 {
return result
}
prmHead.SetRawFlag(true) // to get an error instead of whole object
eg, egCtx := errgroup.WithContext(cmd.Context())
for idx := range len(members) {
partObjID := members[idx]
eg.Go(func() error {
partHeadPrm := prmHead
var partAddr oid.Address
partAddr.SetContainer(cnrID)
partAddr.SetObject(partObjID)
partHeadPrm.SetAddress(partAddr)
obj, err := internalclient.HeadObject(egCtx, partHeadPrm)
if err != nil {
var ecInfoError *objectSDK.ECInfoError
if errors.As(err, &ecInfoError) {
resultGuard.Lock()
defer resultGuard.Unlock()
result = append(result, getECObjectChunks(cmd, cnrID, partObjID, ecInfoError)...)
hasEC = true
return nil
}
return err
}
if obj.Header().Type() != objectSDK.TypeRegular {
commonCmd.ExitOnErr(cmd, "failed to flatten parts of complex object: %w", fmt.Errorf("object '%s' with type '%s' is not supported as part of complex object", partAddr, obj.Header().Type()))
}
if len(obj.Header().Children()) > 0 {
// linking object is not data object, so skip it
return nil
}
resultGuard.Lock()
defer resultGuard.Unlock()
result = append(result, phyObject{
containerID: cnrID,
objectID: partObjID,
})
hasNonEC = true
return nil
})
}
commonCmd.ExitOnErr(cmd, "failed to flatten parts of complex object: %w", eg.Wait())
if hasEC && hasNonEC {
commonCmd.ExitOnErr(cmd, "failed to flatten parts of complex object: %w", errMalformedComplexObject)
}
return result
}
func getECObjectChunks(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, errECInfo *objectSDK.ECInfoError) []phyObject {
ecInfo := errECInfo.ECInfo()
result := make([]phyObject, 0, len(ecInfo.Chunks))
for _, ch := range ecInfo.Chunks {
var chID oid.ID
err := chID.ReadFromV2(ch.ID)
if err != nil {
commonCmd.ExitOnErr(cmd, "failed to read EC chunk ID %w", err)
return nil
return &objectNodesInfo{
containerID: cnrID,
objectID: objID,
relatedObjectIDs: members,
}
result = append(result, phyObject{
containerID: cnrID,
objectID: chID,
ecHeader: &ecHeader{
index: ch.Index,
parent: objID,
},
})
}
return result
members := tryRestoreChainInReverse(cmd, splitInfo, prmHead, cli, cnrID, objID)
return &objectNodesInfo{
containerID: cnrID,
objectID: objID,
relatedObjectIDs: members,
}
}
func getPlacementPolicyAndNetmap(cmd *cobra.Command, cnrID cid.ID, cli *client.Client) (placementPolicy netmapSDK.PlacementPolicy, netmap *netmapSDK.NetMap) {
@ -310,111 +184,88 @@ func getNetMap(ctx context.Context, cli *client.Client) (*netmapSDK.NetMap, erro
return &nm, nil
}
func getRequiredPlacement(cmd *cobra.Command, objects []phyObject, placementPolicy netmapSDK.PlacementPolicy, netmap *netmapSDK.NetMap) *objectNodesResult {
if policy.IsECPlacement(placementPolicy) {
return getECRequiredPlacement(cmd, objects, placementPolicy, netmap)
}
return getReplicaRequiredPlacement(cmd, objects, placementPolicy, netmap)
}
func getReplicaRequiredPlacement(cmd *cobra.Command, objects []phyObject, placementPolicy netmapSDK.PlacementPolicy, netmap *netmapSDK.NetMap) *objectNodesResult {
result := &objectNodesResult{
placements: make(map[oid.ID]objectPlacement),
}
func getRequiredPlacement(cmd *cobra.Command, objInfo *objectNodesInfo, placementPolicy netmapSDK.PlacementPolicy, netmap *netmapSDK.NetMap) map[uint64]netmapSDK.NodeInfo {
nodes := make(map[uint64]netmapSDK.NodeInfo)
placementBuilder := placement.NewNetworkMapBuilder(netmap)
for _, object := range objects {
placement, err := placementBuilder.BuildPlacement(object.containerID, &object.objectID, placementPolicy)
commonCmd.ExitOnErr(cmd, "failed to get required placement for object: %w", err)
for repIdx, rep := range placement {
numOfReplicas := placementPolicy.ReplicaDescriptor(repIdx).NumberOfObjects()
var nodeIdx uint32
for _, n := range rep {
if !object.storedOnAllContainerNodes && nodeIdx == numOfReplicas {
break
}
op := result.placements[object.objectID]
op.requiredNodes = append(op.requiredNodes, n)
result.placements[object.objectID] = op
nodeIdx++
}
}
}
return result
}
func getECRequiredPlacement(cmd *cobra.Command, objects []phyObject, placementPolicy netmapSDK.PlacementPolicy, netmap *netmapSDK.NetMap) *objectNodesResult {
result := &objectNodesResult{
placements: make(map[oid.ID]objectPlacement),
}
for _, object := range objects {
getECRequiredPlacementInternal(cmd, object, placementPolicy, netmap, result)
}
return result
}
func getECRequiredPlacementInternal(cmd *cobra.Command, object phyObject, placementPolicy netmapSDK.PlacementPolicy, netmap *netmapSDK.NetMap, result *objectNodesResult) {
placementObjectID := object.objectID
if object.ecHeader != nil {
placementObjectID = object.ecHeader.parent
}
placementBuilder := placement.NewNetworkMapBuilder(netmap)
placement, err := placementBuilder.BuildPlacement(object.containerID, &placementObjectID, placementPolicy)
placement, err := placementBuilder.BuildPlacement(objInfo.containerID, &objInfo.objectID, placementPolicy)
commonCmd.ExitOnErr(cmd, "failed to get required placement: %w", err)
for _, vector := range placement {
if object.storedOnAllContainerNodes {
for _, node := range vector {
op := result.placements[object.objectID]
op.requiredNodes = append(op.requiredNodes, node)
result.placements[object.objectID] = op
for repIdx, rep := range placement {
numOfReplicas := placementPolicy.ReplicaNumberByIndex(repIdx)
var nodeIdx uint32
for _, n := range rep {
if !objInfo.isLockOrTombstone && nodeIdx == numOfReplicas { // lock and tombstone objects should be on all container nodes
break
}
continue
}
if object.ecHeader != nil {
chunkIdx := int(object.ecHeader.index)
nodeIdx := chunkIdx % len(vector)
node := vector[nodeIdx]
op := result.placements[object.objectID]
op.requiredNodes = append(op.requiredNodes, node)
result.placements[object.objectID] = op
nodes[n.Hash()] = n
nodeIdx++
}
}
for _, relatedObjID := range objInfo.relatedObjectIDs {
placement, err = placementBuilder.BuildPlacement(objInfo.containerID, &relatedObjID, placementPolicy)
commonCmd.ExitOnErr(cmd, "failed to get required placement for related object: %w", err)
for _, rep := range placement {
for _, n := range rep {
nodes[n.Hash()] = n
}
}
}
return nodes
}
func getActualPlacement(cmd *cobra.Command, netmap *netmapSDK.NetMap, pk *ecdsa.PrivateKey, objects []phyObject, result *objectNodesResult) {
func getActualPlacement(cmd *cobra.Command, netmap *netmapSDK.NetMap, requiredPlacement map[uint64]netmapSDK.NodeInfo,
pk *ecdsa.PrivateKey, objInfo *objectNodesInfo,
) map[uint64]boolError {
result := make(map[uint64]boolError)
resultMtx := &sync.Mutex{}
candidates := getNodesToCheckObjectExistance(cmd, netmap, result)
var candidates []netmapSDK.NodeInfo
checkAllNodes, _ := cmd.Flags().GetBool(verifyPresenceAllFlag)
if checkAllNodes {
candidates = netmap.Nodes()
} else {
for _, n := range requiredPlacement {
candidates = append(candidates, n)
}
}
eg, egCtx := errgroup.WithContext(cmd.Context())
for _, cand := range candidates {
cand := cand
eg.Go(func() error {
cli, err := createClient(egCtx, cmd, cand, pk)
if err != nil {
resultMtx.Lock()
defer resultMtx.Unlock()
result.errors = append(result.errors, fmt.Errorf("failed to connect to node %s: %w", hex.EncodeToString(cand.PublicKey()), err))
result[cand.Hash()] = boolError{err: err}
return nil
}
for _, object := range objects {
eg.Go(func() error {
var v boolError
v.value, v.err = isObjectStoredOnNode(egCtx, cmd, objInfo.containerID, objInfo.objectID, cli, pk)
resultMtx.Lock()
defer resultMtx.Unlock()
if prev, exists := result[cand.Hash()]; exists && (prev.err != nil || prev.value) {
return nil
}
result[cand.Hash()] = v
return nil
})
for _, rObjID := range objInfo.relatedObjectIDs {
rObjID := rObjID
eg.Go(func() error {
stored, err := isObjectStoredOnNode(egCtx, cmd, object.containerID, object.objectID, cli, pk)
var v boolError
v.value, v.err = isObjectStoredOnNode(egCtx, cmd, objInfo.containerID, rObjID, cli, pk)
resultMtx.Lock()
defer resultMtx.Unlock()
if err == nil && stored {
op := result.placements[object.objectID]
op.confirmedNodes = append(op.confirmedNodes, cand)
result.placements[object.objectID] = op
}
if err != nil {
result.errors = append(result.errors, fmt.Errorf("failed to check object %s existence on node %s: %w", object.objectID.EncodeToString(), hex.EncodeToString(cand.PublicKey()), err))
if prev, exists := result[cand.Hash()]; exists && (prev.err != nil || prev.value) {
return nil
}
result[cand.Hash()] = v
return nil
})
}
@ -423,43 +274,17 @@ func getActualPlacement(cmd *cobra.Command, netmap *netmapSDK.NetMap, pk *ecdsa.
}
commonCmd.ExitOnErr(cmd, "failed to get actual placement: %w", eg.Wait())
}
func getNodesToCheckObjectExistance(cmd *cobra.Command, netmap *netmapSDK.NetMap, result *objectNodesResult) []netmapSDK.NodeInfo {
checkAllNodes, _ := cmd.Flags().GetBool(verifyPresenceAllFlag)
if checkAllNodes {
return netmap.Nodes()
}
var nodes []netmapSDK.NodeInfo
visited := make(map[uint64]struct{})
for _, p := range result.placements {
for _, node := range p.requiredNodes {
if _, ok := visited[node.Hash()]; !ok {
nodes = append(nodes, node)
visited[node.Hash()] = struct{}{}
}
}
}
return nodes
return result
}
func createClient(ctx context.Context, cmd *cobra.Command, candidate netmapSDK.NodeInfo, pk *ecdsa.PrivateKey) (*client.Client, error) {
var cli *client.Client
var addresses []string
if preferInternal, _ := cmd.Flags().GetBool(preferInternalAddressesFlag); preferInternal {
candidate.IterateNetworkEndpoints(func(s string) bool {
addresses = append(addresses, s)
return false
})
addresses = append(addresses, candidate.ExternalAddresses()...)
} else {
addresses = append(addresses, candidate.ExternalAddresses()...)
candidate.IterateNetworkEndpoints(func(s string) bool {
addresses = append(addresses, s)
return false
})
}
candidate.IterateNetworkEndpoints(func(s string) bool {
addresses = append(addresses, s)
return false
})
addresses = append(addresses, candidate.ExternalAddresses()...)
var lastErr error
for _, address := range addresses {
var networkAddr network.Address
@ -506,109 +331,24 @@ func isObjectStoredOnNode(ctx context.Context, cmd *cobra.Command, cnrID cid.ID,
return false, err
}
func printPlacement(cmd *cobra.Command, objID oid.ID, objects []phyObject, result *objectNodesResult) {
normilizeObjectNodesResult(objects, result)
if json, _ := cmd.Flags().GetBool(commonflags.JSON); json {
printObjectNodesAsJSON(cmd, objID, objects, result)
} else {
printObjectNodesAsText(cmd, objID, objects, result)
}
}
func normilizeObjectNodesResult(objects []phyObject, result *objectNodesResult) {
slices.SortFunc(objects, func(lhs, rhs phyObject) int {
if lhs.ecHeader == nil && rhs.ecHeader == nil {
return bytes.Compare(lhs.objectID[:], rhs.objectID[:])
}
if lhs.ecHeader == nil {
return -1
}
if rhs.ecHeader == nil {
return 1
}
if lhs.ecHeader.parent == rhs.ecHeader.parent {
return cmp.Compare(lhs.ecHeader.index, rhs.ecHeader.index)
}
return bytes.Compare(lhs.ecHeader.parent[:], rhs.ecHeader.parent[:])
})
for _, obj := range objects {
op := result.placements[obj.objectID]
slices.SortFunc(op.confirmedNodes, func(lhs, rhs netmapSDK.NodeInfo) int {
return bytes.Compare(lhs.PublicKey(), rhs.PublicKey())
})
slices.SortFunc(op.requiredNodes, func(lhs, rhs netmapSDK.NodeInfo) int {
return bytes.Compare(lhs.PublicKey(), rhs.PublicKey())
})
result.placements[obj.objectID] = op
}
}
func printObjectNodesAsText(cmd *cobra.Command, objID oid.ID, objects []phyObject, result *objectNodesResult) {
fmt.Fprintf(cmd.OutOrStdout(), "Object %s stores payload in %d data objects:\n", objID.EncodeToString(), len(objects))
for _, object := range objects {
fmt.Fprintf(cmd.OutOrStdout(), "- %s\n", object.objectID)
if object.ecHeader != nil {
fmt.Fprintf(cmd.OutOrStdout(), "\tEC index: %d\n", object.ecHeader.index)
fmt.Fprintf(cmd.OutOrStdout(), "\tEC parent: %s\n", object.ecHeader.parent.EncodeToString())
}
op, ok := result.placements[object.objectID]
if !ok {
continue
}
if len(op.requiredNodes) > 0 {
fmt.Fprintf(cmd.OutOrStdout(), "\tRequired nodes:\n")
for _, node := range op.requiredNodes {
fmt.Fprintf(cmd.OutOrStdout(), "\t\t- %s\n", hex.EncodeToString(node.PublicKey()))
func printPlacement(cmd *cobra.Command, netmap *netmapSDK.NetMap, requiredPlacement map[uint64]netmapSDK.NodeInfo, actualPlacement map[uint64]boolError) {
w := tabwriter.NewWriter(cmd.OutOrStdout(), 0, 0, 1, ' ', tabwriter.AlignRight|tabwriter.Debug)
defer func() {
commonCmd.ExitOnErr(cmd, "failed to print placement info: %w", w.Flush())
}()
fmt.Fprintln(w, "Node ID\tShould contain object\tActually contains object\t")
for _, n := range netmap.Nodes() {
nodeID := hex.EncodeToString(n.PublicKey())
_, required := requiredPlacement[n.Hash()]
actual, actualExists := actualPlacement[n.Hash()]
actualStr := ""
if actualExists {
if actual.err != nil {
actualStr = fmt.Sprintf("error: %v", actual.err)
} else {
actualStr = strconv.FormatBool(actual.value)
}
}
if len(op.confirmedNodes) > 0 {
fmt.Fprintf(cmd.OutOrStdout(), "\tConfirmed nodes:\n")
for _, node := range op.confirmedNodes {
fmt.Fprintf(cmd.OutOrStdout(), "\t\t- %s\n", hex.EncodeToString(node.PublicKey()))
}
}
}
if len(result.errors) == 0 {
return
}
fmt.Fprintf(cmd.OutOrStdout(), "Errors:\n")
for _, err := range result.errors {
fmt.Fprintf(cmd.OutOrStdout(), "\t%s\n", err.Error())
fmt.Fprintf(w, "%s\t%s\t%s\t\n", nodeID, strconv.FormatBool(required), actualStr)
}
}
func printObjectNodesAsJSON(cmd *cobra.Command, objID oid.ID, objects []phyObject, result *objectNodesResult) {
jsonResult := &objNodesResultJSON{
ObjectID: objID.EncodeToString(),
}
for _, object := range objects {
do := ObjNodesDataObject{
ObjectID: object.objectID.EncodeToString(),
}
if object.ecHeader != nil {
do.ECIndex = &object.ecHeader.index
ecParent := object.ecHeader.parent.EncodeToString()
do.ECParentObjectID = &ecParent
}
op, ok := result.placements[object.objectID]
if !ok {
continue
}
for _, rn := range op.requiredNodes {
do.RequiredNodes = append(do.RequiredNodes, hex.EncodeToString(rn.PublicKey()))
}
for _, cn := range op.confirmedNodes {
do.ConfirmedNodes = append(do.ConfirmedNodes, hex.EncodeToString(cn.PublicKey()))
}
jsonResult.DataObjects = append(jsonResult.DataObjects, do)
}
for _, err := range result.errors {
jsonResult.Errors = append(jsonResult.Errors, err.Error())
}
b, err := json.Marshal(jsonResult)
commonCmd.ExitOnErr(cmd, "failed to marshal json: %w", err)
cmd.Println(string(b))
}

View file

@ -1,151 +0,0 @@
package object
import (
"fmt"
"strconv"
"strings"
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/spf13/cobra"
)
const (
newAttrsFlagName = "new-attrs"
replaceAttrsFlagName = "replace-attrs"
rangeFlagName = "range"
payloadFlagName = "payload"
)
var objectPatchCmd = &cobra.Command{
Use: "patch",
Run: patch,
Short: "Patch FrostFS object",
Long: "Patch FrostFS object. Each range passed to the command requires to pass a corresponding patch payload.",
Example: `
frostfs-cli -c config.yml -r 127.0.0.1:8080 object patch --cid <CID> --oid <OID> --new-attrs 'key1=val1,key2=val2' --replace-attrs
frostfs-cli -c config.yml -r 127.0.0.1:8080 object patch --cid <CID> --oid <OID> --range offX:lnX --payload /path/to/payloadX --range offY:lnY --payload /path/to/payloadY
frostfs-cli -c config.yml -r 127.0.0.1:8080 object patch --cid <CID> --oid <OID> --new-attrs 'key1=val1,key2=val2' --replace-attrs --range offX:lnX --payload /path/to/payload
`,
}
func initObjectPatchCmd() {
commonflags.Init(objectPatchCmd)
initFlagSession(objectPatchCmd, "PATCH")
flags := objectPatchCmd.Flags()
flags.String(commonflags.CIDFlag, "", commonflags.CIDFlagUsage)
_ = objectRangeCmd.MarkFlagRequired(commonflags.CIDFlag)
flags.String(commonflags.OIDFlag, "", commonflags.OIDFlagUsage)
_ = objectRangeCmd.MarkFlagRequired(commonflags.OIDFlag)
flags.String(newAttrsFlagName, "", "New object attributes in form of Key1=Value1,Key2=Value2")
flags.Bool(replaceAttrsFlagName, false, "Replace object attributes by new ones.")
flags.StringSlice(rangeFlagName, []string{}, "Range to which patch payload is applied. Format: offset:length")
flags.StringSlice(payloadFlagName, []string{}, "Path to file with patch payload.")
}
func patch(cmd *cobra.Command, _ []string) {
var cnr cid.ID
var obj oid.ID
objAddr := readObjectAddress(cmd, &cnr, &obj)
ranges, err := getRangeSlice(cmd)
commonCmd.ExitOnErr(cmd, "", err)
payloads := patchPayloadPaths(cmd)
if len(ranges) != len(payloads) {
commonCmd.ExitOnErr(cmd, "", fmt.Errorf("the number of ranges and payloads are not equal: ranges = %d, payloads = %d", len(ranges), len(payloads)))
}
newAttrs, err := parseNewObjectAttrs(cmd)
commonCmd.ExitOnErr(cmd, "can't parse new object attributes: %w", err)
replaceAttrs, _ := cmd.Flags().GetBool(replaceAttrsFlagName)
pk := key.GetOrGenerate(cmd)
cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC)
var prm internalclient.PatchObjectPrm
prm.SetClient(cli)
Prepare(cmd, &prm)
ReadOrOpenSession(cmd, &prm, pk, cnr, nil)
prm.SetAddress(objAddr)
prm.NewAttributes = newAttrs
prm.ReplaceAttribute = replaceAttrs
for i := range ranges {
prm.PayloadPatches = append(prm.PayloadPatches, internalclient.PayloadPatch{
Range: ranges[i],
PayloadPath: payloads[i],
})
}
res, err := internalclient.Patch(cmd.Context(), prm)
if err != nil {
commonCmd.ExitOnErr(cmd, "can't patch the object: %w", err)
}
cmd.Println("Patched object ID: ", res.OID.EncodeToString())
}
func parseNewObjectAttrs(cmd *cobra.Command) ([]objectSDK.Attribute, error) {
var rawAttrs []string
raw := cmd.Flag(newAttrsFlagName).Value.String()
if len(raw) != 0 {
rawAttrs = strings.Split(raw, ",")
}
attrs := make([]objectSDK.Attribute, len(rawAttrs), len(rawAttrs)+2) // name + timestamp attributes
for i := range rawAttrs {
k, v, found := strings.Cut(rawAttrs[i], "=")
if !found {
return nil, fmt.Errorf("invalid attribute format: %s", rawAttrs[i])
}
attrs[i].SetKey(k)
attrs[i].SetValue(v)
}
return attrs, nil
}
func getRangeSlice(cmd *cobra.Command) ([]objectSDK.Range, error) {
v, _ := cmd.Flags().GetStringSlice(rangeFlagName)
if len(v) == 0 {
return []objectSDK.Range{}, nil
}
rs := make([]objectSDK.Range, len(v))
for i := range v {
before, after, found := strings.Cut(v[i], rangeSep)
if !found {
return nil, fmt.Errorf("invalid range specifier: %s", v[i])
}
offset, err := strconv.ParseUint(before, 10, 64)
if err != nil {
return nil, fmt.Errorf("invalid '%s' range offset specifier: %w", v[i], err)
}
length, err := strconv.ParseUint(after, 10, 64)
if err != nil {
return nil, fmt.Errorf("invalid '%s' range length specifier: %w", v[i], err)
}
rs[i].SetOffset(offset)
rs[i].SetLength(length)
}
return rs, nil
}
func patchPayloadPaths(cmd *cobra.Command) []string {
v, _ := cmd.Flags().GetStringSlice(payloadFlagName)
return v
}

View file

@ -146,54 +146,6 @@ func marshalSplitInfo(cmd *cobra.Command, info *objectSDK.SplitInfo) ([]byte, er
}
}
func printECInfoErr(cmd *cobra.Command, err error) bool {
var errECInfo *objectSDK.ECInfoError
ok := errors.As(err, &errECInfo)
if ok {
toJSON, _ := cmd.Flags().GetBool(commonflags.JSON)
toProto, _ := cmd.Flags().GetBool("proto")
if !(toJSON || toProto) {
cmd.PrintErrln("Object is erasure-encoded, ec information received.")
}
printECInfo(cmd, errECInfo.ECInfo())
}
return ok
}
func printECInfo(cmd *cobra.Command, info *objectSDK.ECInfo) {
bs, err := marshalECInfo(cmd, info)
commonCmd.ExitOnErr(cmd, "can't marshal split info: %w", err)
cmd.Println(string(bs))
}
func marshalECInfo(cmd *cobra.Command, info *objectSDK.ECInfo) ([]byte, error) {
toJSON, _ := cmd.Flags().GetBool(commonflags.JSON)
toProto, _ := cmd.Flags().GetBool("proto")
switch {
case toJSON && toProto:
return nil, errors.New("'--json' and '--proto' flags are mutually exclusive")
case toJSON:
return info.MarshalJSON()
case toProto:
return info.Marshal()
default:
b := bytes.NewBuffer(nil)
b.WriteString("Total chunks: " + strconv.Itoa(int(info.Chunks[0].Total)))
for _, chunk := range info.Chunks {
var id oid.ID
if err := id.Decode(chunk.ID.GetValue()); err != nil {
return nil, fmt.Errorf("unable to decode chunk id: %w", err)
}
b.WriteString("\n Index: " + strconv.Itoa(int(chunk.Index)) + " ID: " + id.String())
}
return b.Bytes(), nil
}
}
func getRangeList(cmd *cobra.Command) ([]objectSDK.Range, error) {
v := cmd.Flag("range").Value.String()
if len(v) == 0 {

View file

@ -29,7 +29,6 @@ func init() {
objectRangeCmd,
objectLockCmd,
objectNodesCmd,
objectPatchCmd,
}
Cmd.AddCommand(objectChildCommands...)
@ -40,7 +39,6 @@ func init() {
}
initObjectPutCmd()
initObjectPatchCmd()
initObjectDeleteCmd()
initObjectGetCmd()
initObjectSearchCmd()

View file

@ -306,8 +306,6 @@ func finalizeSession(cmd *cobra.Command, dst SessionPrm, tok *session.Object, ke
case *internal.PutObjectPrm:
common.PrintVerbose(cmd, "Binding session to object PUT...")
tok.ForVerb(session.VerbObjectPut)
case *internal.PatchObjectPrm:
tok.ForVerb(session.VerbObjectPatch)
case *internal.DeleteObjectPrm:
common.PrintVerbose(cmd, "Binding session to object DELETE...")
tok.ForVerb(session.VerbObjectDelete)
@ -356,38 +354,31 @@ func collectObjectRelatives(cmd *cobra.Command, cli *client.Client, cnr cid.ID,
Prepare(cmd, &prmHead)
o, err := internal.HeadObject(cmd.Context(), prmHead)
_, err := internal.HeadObject(cmd.Context(), prmHead)
var errSplit *objectSDK.SplitInfoError
var errEC *objectSDK.ECInfoError
switch {
default:
commonCmd.ExitOnErr(cmd, "failed to get raw object header: %w", err)
case err == nil:
common.PrintVerbose(cmd, "Raw header received - object is singular.")
if ech := o.Header().ECHeader(); ech != nil {
commonCmd.ExitOnErr(cmd, "Lock EC chunk failed: %w", errors.ErrUnsupported)
}
return nil
case errors.As(err, &errSplit):
common.PrintVerbose(cmd, "Split information received - object is virtual.")
splitInfo := errSplit.SplitInfo()
if members, ok := tryGetSplitMembersByLinkingObject(cmd, splitInfo, prmHead, cnr); ok {
return members
}
if members, ok := tryGetSplitMembersBySplitID(cmd, splitInfo, cli, cnr); ok {
return members
}
return tryRestoreChainInReverse(cmd, splitInfo, prmHead, cli, cnr, obj)
case errors.As(err, &errEC):
common.PrintVerbose(cmd, "Object is erasure-coded.")
return nil
}
return nil
splitInfo := errSplit.SplitInfo()
if members, ok := tryGetSplitMembersByLinkingObject(cmd, splitInfo, prmHead, cnr); ok {
return members
}
if members, ok := tryGetSplitMembersBySplitID(cmd, splitInfo, cli, cnr); ok {
return members
}
return tryRestoreChainInReverse(cmd, splitInfo, prmHead, cli, cnr, obj)
}
func tryGetSplitMembersByLinkingObject(cmd *cobra.Command, splitInfo *objectSDK.SplitInfo, prmHead internal.HeadObjectPrm, cnr cid.ID) ([]oid.ID, bool) {
@ -411,6 +402,7 @@ func tryGetSplitMembersByLinkingObject(cmd *cobra.Command, splitInfo *objectSDK.
common.PrintVerbose(cmd, "Received split members from the linking object: %v", children)
// include linking object
return append(children, idLinking), true
}

View file

@ -8,7 +8,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
accountingCli "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/accounting"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/acl"
apemanager "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/ape_manager"
bearerCli "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/bearer"
containerCli "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/container"
controlCli "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/control"
@ -82,7 +81,6 @@ func init() {
rootCmd.Flags().Bool("version", false, "Application version and FrostFS API compatibility")
rootCmd.AddCommand(acl.Cmd)
rootCmd.AddCommand(apemanager.Cmd)
rootCmd.AddCommand(bearerCli.Cmd)
rootCmd.AddCommand(sessionCli.Cmd)
rootCmd.AddCommand(accountingCli.Cmd)

View file

@ -47,10 +47,9 @@ func add(cmd *cobra.Command, _ []string) {
meta, err := parseMeta(cmd)
commonCmd.ExitOnErr(cmd, "meta data parsing: %w", err)
ctx, cancel := contextWithTimeout(cmd)
defer cancel()
ctx := cmd.Context()
cli, err := _client()
cli, err := _client(ctx)
commonCmd.ExitOnErr(cmd, "failed to create client: %w", err)
rawCID := make([]byte, sha256.Size)
@ -73,18 +72,18 @@ func add(cmd *cobra.Command, _ []string) {
commonCmd.ExitOnErr(cmd, "signing message: %w", tree.SignMessage(req, pk))
resp, err := cli.Add(ctx, req)
commonCmd.ExitOnErr(cmd, "failed to call add: %w", err)
commonCmd.ExitOnErr(cmd, "failed to cal add: %w", err)
cmd.Println("Node ID: ", resp.GetBody().GetNodeId())
}
func parseMeta(cmd *cobra.Command) ([]tree.KeyValue, error) {
func parseMeta(cmd *cobra.Command) ([]*tree.KeyValue, error) {
raws, _ := cmd.Flags().GetStringSlice(metaFlagKey)
if len(raws) == 0 {
return nil, nil
}
pairs := make([]tree.KeyValue, 0, len(raws))
pairs := make([]*tree.KeyValue, 0, len(raws))
for i := range raws {
k, v, found := strings.Cut(raws[i], "=")
if !found {
@ -95,7 +94,7 @@ func parseMeta(cmd *cobra.Command) ([]tree.KeyValue, error) {
pair.Key = k
pair.Value = []byte(v)
pairs = append(pairs, pair)
pairs = append(pairs, &pair)
}
return pairs, nil

View file

@ -50,10 +50,9 @@ func addByPath(cmd *cobra.Command, _ []string) {
commonCmd.ExitOnErr(cmd, "decode container ID string: %w", err)
tid, _ := cmd.Flags().GetString(treeIDFlagKey)
ctx, cancel := contextWithTimeout(cmd)
defer cancel()
ctx := cmd.Context()
cli, err := _client()
cli, err := _client(ctx)
commonCmd.ExitOnErr(cmd, "failed to create client: %w", err)
rawCID := make([]byte, sha256.Size)

View file

@ -3,14 +3,13 @@ package tree
import (
"context"
"strings"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree"
metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc"
tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
@ -18,7 +17,7 @@ import (
// _client returns grpc Tree service client. Should be removed
// after making Tree API public.
func _client() (tree.TreeServiceClient, error) {
func _client(ctx context.Context) (tree.TreeServiceClient, error) {
var netAddr network.Address
err := netAddr.FromString(viper.GetString(commonflags.RPC))
if err != nil {
@ -26,6 +25,7 @@ func _client() (tree.TreeServiceClient, error) {
}
opts := []grpc.DialOption{
grpc.WithBlock(),
grpc.WithChainUnaryInterceptor(
metrics.NewUnaryClientInterceptor(),
tracing.NewUnaryClientInteceptor(),
@ -40,14 +40,12 @@ func _client() (tree.TreeServiceClient, error) {
opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials()))
}
cc, err := grpc.NewClient(netAddr.URIAddr(), opts...)
// a default connection establishing timeout
const defaultClientConnectTimeout = time.Second * 2
ctx, cancel := context.WithTimeout(ctx, defaultClientConnectTimeout)
cc, err := grpc.DialContext(ctx, netAddr.URIAddr(), opts...)
cancel()
return tree.NewTreeServiceClient(cc), err
}
func contextWithTimeout(cmd *cobra.Command) (context.Context, context.CancelFunc) {
if timeout := viper.GetDuration(commonflags.Timeout); timeout > 0 {
common.PrintVerbose(cmd, "Set request timeout to %s.", timeout)
return context.WithTimeout(cmd.Context(), timeout)
}
return context.WithTimeout(cmd.Context(), commonflags.TimeoutDefault)
}

View file

@ -50,10 +50,9 @@ func getByPath(cmd *cobra.Command, _ []string) {
commonCmd.ExitOnErr(cmd, "decode container ID string: %w", err)
tid, _ := cmd.Flags().GetString(treeIDFlagKey)
ctx, cancel := contextWithTimeout(cmd)
defer cancel()
ctx := cmd.Context()
cli, err := _client()
cli, err := _client(ctx)
commonCmd.ExitOnErr(cmd, "failed to create client: %w", err)
rawCID := make([]byte, sha256.Size)

View file

@ -44,10 +44,9 @@ func getOpLog(cmd *cobra.Command, _ []string) {
commonCmd.ExitOnErr(cmd, "decode container ID string: %w", err)
tid, _ := cmd.Flags().GetString(treeIDFlagKey)
ctx, cancel := contextWithTimeout(cmd)
defer cancel()
ctx := cmd.Context()
cli, err := _client()
cli, err := _client(ctx)
commonCmd.ExitOnErr(cmd, "failed to create client: %w", err)
rawCID := make([]byte, sha256.Size)

View file

@ -26,10 +26,9 @@ func initHealthcheckCmd() {
func healthcheck(cmd *cobra.Command, _ []string) {
pk := key.GetOrGenerate(cmd)
ctx, cancel := contextWithTimeout(cmd)
defer cancel()
ctx := cmd.Context()
cli, err := _client()
cli, err := _client(ctx)
commonCmd.ExitOnErr(cmd, "failed to create client: %w", err)
req := &tree.HealthcheckRequest{

Some files were not shown because too many files have changed in this diff Show more