Compare commits
2 commits
master
...
fix/ec_hea
Author | SHA1 | Date | |
---|---|---|---|
308da7cb01 | |||
37b83c0856 |
416 changed files with 6304 additions and 10459 deletions
|
@ -1,4 +1,4 @@
|
||||||
FROM golang:1.22 AS builder
|
FROM golang:1.22 as builder
|
||||||
ARG BUILD=now
|
ARG BUILD=now
|
||||||
ARG VERSION=dev
|
ARG VERSION=dev
|
||||||
ARG REPO=repository
|
ARG REPO=repository
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
FROM golang:1.22 AS builder
|
FROM golang:1.22 as builder
|
||||||
ARG BUILD=now
|
ARG BUILD=now
|
||||||
ARG VERSION=dev
|
ARG VERSION=dev
|
||||||
ARG REPO=repository
|
ARG REPO=repository
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
FROM golang:1.22 AS builder
|
FROM golang:1.22 as builder
|
||||||
ARG BUILD=now
|
ARG BUILD=now
|
||||||
ARG VERSION=dev
|
ARG VERSION=dev
|
||||||
ARG REPO=repository
|
ARG REPO=repository
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
FROM golang:1.22 AS builder
|
FROM golang:1.22 as builder
|
||||||
ARG BUILD=now
|
ARG BUILD=now
|
||||||
ARG VERSION=dev
|
ARG VERSION=dev
|
||||||
ARG REPO=repository
|
ARG REPO=repository
|
||||||
|
|
|
@ -8,7 +8,7 @@ jobs:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
go_versions: [ '1.22', '1.23' ]
|
go_versions: [ '1.21', '1.22' ]
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
|
|
|
@ -16,7 +16,7 @@ jobs:
|
||||||
- name: Set up Go
|
- name: Set up Go
|
||||||
uses: actions/setup-go@v3
|
uses: actions/setup-go@v3
|
||||||
with:
|
with:
|
||||||
go-version: 1.23
|
go-version: 1.22
|
||||||
- name: Set up Python
|
- name: Set up Python
|
||||||
run: |
|
run: |
|
||||||
apt update
|
apt update
|
||||||
|
|
|
@ -11,7 +11,7 @@ jobs:
|
||||||
- name: Set up Go
|
- name: Set up Go
|
||||||
uses: actions/setup-go@v3
|
uses: actions/setup-go@v3
|
||||||
with:
|
with:
|
||||||
go-version: '1.23'
|
go-version: '1.22'
|
||||||
cache: true
|
cache: true
|
||||||
|
|
||||||
- name: Install linters
|
- name: Install linters
|
||||||
|
@ -25,7 +25,7 @@ jobs:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
go_versions: [ '1.22', '1.23' ]
|
go_versions: [ '1.21', '1.22' ]
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
|
@ -48,7 +48,7 @@ jobs:
|
||||||
- name: Set up Go
|
- name: Set up Go
|
||||||
uses: actions/setup-go@v3
|
uses: actions/setup-go@v3
|
||||||
with:
|
with:
|
||||||
go-version: '1.22'
|
go-version: '1.21'
|
||||||
cache: true
|
cache: true
|
||||||
|
|
||||||
- name: Run tests
|
- name: Run tests
|
||||||
|
@ -63,7 +63,7 @@ jobs:
|
||||||
- name: Set up Go
|
- name: Set up Go
|
||||||
uses: actions/setup-go@v3
|
uses: actions/setup-go@v3
|
||||||
with:
|
with:
|
||||||
go-version: '1.23'
|
go-version: '1.22'
|
||||||
cache: true
|
cache: true
|
||||||
|
|
||||||
- name: Install staticcheck
|
- name: Install staticcheck
|
||||||
|
@ -81,7 +81,7 @@ jobs:
|
||||||
- name: Set up Go
|
- name: Set up Go
|
||||||
uses: actions/setup-go@v3
|
uses: actions/setup-go@v3
|
||||||
with:
|
with:
|
||||||
go-version: '1.22'
|
go-version: '1.21'
|
||||||
cache: true
|
cache: true
|
||||||
|
|
||||||
- name: Install gopls
|
- name: Install gopls
|
||||||
|
@ -89,23 +89,3 @@ jobs:
|
||||||
|
|
||||||
- name: Run gopls
|
- name: Run gopls
|
||||||
run: make gopls-run
|
run: make gopls-run
|
||||||
|
|
||||||
fumpt:
|
|
||||||
name: Run gofumpt
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
|
|
||||||
- name: Set up Go
|
|
||||||
uses: actions/setup-go@v3
|
|
||||||
with:
|
|
||||||
go-version: '1.23'
|
|
||||||
cache: true
|
|
||||||
|
|
||||||
- name: Install gofumpt
|
|
||||||
run: make fumpt-install
|
|
||||||
|
|
||||||
- name: Run gofumpt
|
|
||||||
run: |
|
|
||||||
make fumpt
|
|
||||||
git diff --exit-code --quiet
|
|
||||||
|
|
|
@ -13,7 +13,7 @@ jobs:
|
||||||
- name: Setup Go
|
- name: Setup Go
|
||||||
uses: actions/setup-go@v3
|
uses: actions/setup-go@v3
|
||||||
with:
|
with:
|
||||||
go-version: '1.23'
|
go-version: '1.22'
|
||||||
|
|
||||||
- name: Install govulncheck
|
- name: Install govulncheck
|
||||||
run: go install golang.org/x/vuln/cmd/govulncheck@latest
|
run: go install golang.org/x/vuln/cmd/govulncheck@latest
|
||||||
|
|
|
@ -12,8 +12,7 @@ run:
|
||||||
# output configuration options
|
# output configuration options
|
||||||
output:
|
output:
|
||||||
# colored-line-number|line-number|json|tab|checkstyle|code-climate, default is "colored-line-number"
|
# colored-line-number|line-number|json|tab|checkstyle|code-climate, default is "colored-line-number"
|
||||||
formats:
|
format: tab
|
||||||
- format: tab
|
|
||||||
|
|
||||||
# all available settings of specific linters
|
# all available settings of specific linters
|
||||||
linters-settings:
|
linters-settings:
|
||||||
|
@ -67,7 +66,7 @@ linters:
|
||||||
- bidichk
|
- bidichk
|
||||||
- durationcheck
|
- durationcheck
|
||||||
- exhaustive
|
- exhaustive
|
||||||
- copyloopvar
|
- exportloopref
|
||||||
- gofmt
|
- gofmt
|
||||||
- goimports
|
- goimports
|
||||||
- misspell
|
- misspell
|
||||||
|
|
33
Makefile
33
Makefile
|
@ -4,19 +4,20 @@ SHELL = bash
|
||||||
REPO ?= $(shell go list -m)
|
REPO ?= $(shell go list -m)
|
||||||
VERSION ?= $(shell git describe --tags --dirty --match "v*" --always --abbrev=8 2>/dev/null || cat VERSION 2>/dev/null || echo "develop")
|
VERSION ?= $(shell git describe --tags --dirty --match "v*" --always --abbrev=8 2>/dev/null || cat VERSION 2>/dev/null || echo "develop")
|
||||||
|
|
||||||
HUB_IMAGE ?= git.frostfs.info/truecloudlab/frostfs
|
HUB_IMAGE ?= truecloudlab/frostfs
|
||||||
HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')"
|
HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')"
|
||||||
|
|
||||||
GO_VERSION ?= 1.22
|
GO_VERSION ?= 1.22
|
||||||
LINT_VERSION ?= 1.60.3
|
LINT_VERSION ?= 1.56.1
|
||||||
TRUECLOUDLAB_LINT_VERSION ?= 0.0.7
|
TRUECLOUDLAB_LINT_VERSION ?= 0.0.5
|
||||||
PROTOC_VERSION ?= 25.0
|
PROTOC_VERSION ?= 25.0
|
||||||
|
PROTOC_GEN_GO_VERSION ?= $(shell go list -f '{{.Version}}' -m google.golang.org/protobuf)
|
||||||
PROTOGEN_FROSTFS_VERSION ?= $(shell go list -f '{{.Version}}' -m git.frostfs.info/TrueCloudLab/frostfs-api-go/v2)
|
PROTOGEN_FROSTFS_VERSION ?= $(shell go list -f '{{.Version}}' -m git.frostfs.info/TrueCloudLab/frostfs-api-go/v2)
|
||||||
PROTOC_OS_VERSION=osx-x86_64
|
PROTOC_OS_VERSION=osx-x86_64
|
||||||
ifeq ($(shell uname), Linux)
|
ifeq ($(shell uname), Linux)
|
||||||
PROTOC_OS_VERSION=linux-x86_64
|
PROTOC_OS_VERSION=linux-x86_64
|
||||||
endif
|
endif
|
||||||
STATICCHECK_VERSION ?= 2024.1.1
|
STATICCHECK_VERSION ?= 2023.1.6
|
||||||
ARCH = amd64
|
ARCH = amd64
|
||||||
|
|
||||||
BIN = bin
|
BIN = bin
|
||||||
|
@ -38,16 +39,13 @@ LINT_DIR = $(OUTPUT_LINT_DIR)/golangci-lint-$(LINT_VERSION)-v$(TRUECLOUDLAB_LINT
|
||||||
TMP_DIR := .cache
|
TMP_DIR := .cache
|
||||||
PROTOBUF_DIR ?= $(abspath $(BIN))/protobuf
|
PROTOBUF_DIR ?= $(abspath $(BIN))/protobuf
|
||||||
PROTOC_DIR ?= $(PROTOBUF_DIR)/protoc-v$(PROTOC_VERSION)
|
PROTOC_DIR ?= $(PROTOBUF_DIR)/protoc-v$(PROTOC_VERSION)
|
||||||
|
PROTOC_GEN_GO_DIR ?= $(PROTOBUF_DIR)/protoc-gen-go-$(PROTOC_GEN_GO_VERSION)
|
||||||
PROTOGEN_FROSTFS_DIR ?= $(PROTOBUF_DIR)/protogen-$(PROTOGEN_FROSTFS_VERSION)
|
PROTOGEN_FROSTFS_DIR ?= $(PROTOBUF_DIR)/protogen-$(PROTOGEN_FROSTFS_VERSION)
|
||||||
STATICCHECK_DIR ?= $(abspath $(BIN))/staticcheck
|
STATICCHECK_DIR ?= $(abspath $(BIN))/staticcheck
|
||||||
STATICCHECK_VERSION_DIR ?= $(STATICCHECK_DIR)/$(STATICCHECK_VERSION)
|
STATICCHECK_VERSION_DIR ?= $(STATICCHECK_DIR)/$(STATICCHECK_VERSION)
|
||||||
|
|
||||||
SOURCES = $(shell find . -type f -name "*.go" -print)
|
SOURCES = $(shell find . -type f -name "*.go" -print)
|
||||||
|
|
||||||
GOFUMPT_VERSION ?= v0.7.0
|
|
||||||
GOFUMPT_DIR ?= $(abspath $(BIN))/gofumpt
|
|
||||||
GOFUMPT_VERSION_DIR ?= $(GOFUMPT_DIR)/$(GOFUMPT_VERSION)
|
|
||||||
|
|
||||||
GOPLS_VERSION ?= v0.15.1
|
GOPLS_VERSION ?= v0.15.1
|
||||||
GOPLS_DIR ?= $(abspath $(BIN))/gopls
|
GOPLS_DIR ?= $(abspath $(BIN))/gopls
|
||||||
GOPLS_VERSION_DIR ?= $(GOPLS_DIR)/$(GOPLS_VERSION)
|
GOPLS_VERSION_DIR ?= $(GOPLS_DIR)/$(GOPLS_VERSION)
|
||||||
|
@ -105,15 +103,17 @@ export-metrics: dep
|
||||||
|
|
||||||
# Regenerate proto files:
|
# Regenerate proto files:
|
||||||
protoc:
|
protoc:
|
||||||
@if [ ! -d "$(PROTOC_DIR)" ] || [ ! -d "$(PROTOGEN_FROSTFS_DIR)" ]; then \
|
@if [ ! -d "$(PROTOC_DIR)" ] || [ ! -d "$(PROTOC_GEN_GO_DIR)" ] || [ ! -d "$(PROTOGEN_FROSTFS_DIR)" ]; then \
|
||||||
make protoc-install; \
|
make protoc-install; \
|
||||||
fi
|
fi
|
||||||
@for f in `find . -type f -name '*.proto' -not -path './bin/*'`; do \
|
@for f in `find . -type f -name '*.proto' -not -path './bin/*'`; do \
|
||||||
echo "⇒ Processing $$f "; \
|
echo "⇒ Processing $$f "; \
|
||||||
$(PROTOC_DIR)/bin/protoc \
|
$(PROTOC_DIR)/bin/protoc \
|
||||||
--proto_path=.:$(PROTOC_DIR)/include:/usr/local/include \
|
--proto_path=.:$(PROTOC_DIR)/include:/usr/local/include \
|
||||||
|
--plugin=protoc-gen-go=$(PROTOC_GEN_GO_DIR)/protoc-gen-go \
|
||||||
--plugin=protoc-gen-go-frostfs=$(PROTOGEN_FROSTFS_DIR)/protogen \
|
--plugin=protoc-gen-go-frostfs=$(PROTOGEN_FROSTFS_DIR)/protogen \
|
||||||
--go-frostfs_out=. --go-frostfs_opt=paths=source_relative \
|
--go-frostfs_out=. --go-frostfs_opt=paths=source_relative \
|
||||||
|
--go_out=. --go_opt=paths=source_relative \
|
||||||
--go-grpc_opt=require_unimplemented_servers=false \
|
--go-grpc_opt=require_unimplemented_servers=false \
|
||||||
--go-grpc_out=. --go-grpc_opt=paths=source_relative $$f; \
|
--go-grpc_out=. --go-grpc_opt=paths=source_relative $$f; \
|
||||||
done
|
done
|
||||||
|
@ -126,6 +126,8 @@ protoc-install:
|
||||||
@wget -q -O $(PROTOBUF_DIR)/protoc-$(PROTOC_VERSION).zip 'https://github.com/protocolbuffers/protobuf/releases/download/v$(PROTOC_VERSION)/protoc-$(PROTOC_VERSION)-$(PROTOC_OS_VERSION).zip'
|
@wget -q -O $(PROTOBUF_DIR)/protoc-$(PROTOC_VERSION).zip 'https://github.com/protocolbuffers/protobuf/releases/download/v$(PROTOC_VERSION)/protoc-$(PROTOC_VERSION)-$(PROTOC_OS_VERSION).zip'
|
||||||
@unzip -q -o $(PROTOBUF_DIR)/protoc-$(PROTOC_VERSION).zip -d $(PROTOC_DIR)
|
@unzip -q -o $(PROTOBUF_DIR)/protoc-$(PROTOC_VERSION).zip -d $(PROTOC_DIR)
|
||||||
@rm $(PROTOBUF_DIR)/protoc-$(PROTOC_VERSION).zip
|
@rm $(PROTOBUF_DIR)/protoc-$(PROTOC_VERSION).zip
|
||||||
|
@echo "⇒ Installing protoc-gen-go..."
|
||||||
|
@GOBIN=$(PROTOC_GEN_GO_DIR) go install -v google.golang.org/protobuf/...@$(PROTOC_GEN_GO_VERSION)
|
||||||
@echo "⇒ Instaling protogen FrostFS plugin..."
|
@echo "⇒ Instaling protogen FrostFS plugin..."
|
||||||
@GOBIN=$(PROTOGEN_FROSTFS_DIR) go install -mod=mod -v git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/protogen@$(PROTOGEN_FROSTFS_VERSION)
|
@GOBIN=$(PROTOGEN_FROSTFS_DIR) go install -mod=mod -v git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/protogen@$(PROTOGEN_FROSTFS_VERSION)
|
||||||
|
|
||||||
|
@ -163,19 +165,10 @@ imports:
|
||||||
@echo "⇒ Processing goimports check"
|
@echo "⇒ Processing goimports check"
|
||||||
@goimports -w cmd/ pkg/ misc/
|
@goimports -w cmd/ pkg/ misc/
|
||||||
|
|
||||||
# Install gofumpt
|
|
||||||
fumpt-install:
|
|
||||||
@rm -rf $(GOFUMPT_DIR)
|
|
||||||
@mkdir $(GOFUMPT_DIR)
|
|
||||||
@GOBIN=$(GOFUMPT_VERSION_DIR) go install mvdan.cc/gofumpt@$(GOFUMPT_VERSION)
|
|
||||||
|
|
||||||
# Run gofumpt
|
# Run gofumpt
|
||||||
fumpt:
|
fumpt:
|
||||||
@if [ ! -d "$(GOFUMPT_VERSION_DIR)" ]; then \
|
|
||||||
make fumpt-install; \
|
|
||||||
fi
|
|
||||||
@echo "⇒ Processing gofumpt check"
|
@echo "⇒ Processing gofumpt check"
|
||||||
$(GOFUMPT_VERSION_DIR)/gofumpt -l -w cmd/ pkg/ misc/
|
@gofumpt -l -w cmd/ pkg/ misc/
|
||||||
|
|
||||||
# Run Unit Test with go test
|
# Run Unit Test with go test
|
||||||
test: GOFLAGS ?= "-count=1"
|
test: GOFLAGS ?= "-count=1"
|
||||||
|
@ -197,7 +190,7 @@ lint-install:
|
||||||
@@make -C $(TMP_DIR)/linters lib CGO_ENABLED=1 OUT_DIR=$(OUTPUT_LINT_DIR)
|
@@make -C $(TMP_DIR)/linters lib CGO_ENABLED=1 OUT_DIR=$(OUTPUT_LINT_DIR)
|
||||||
@rm -rf $(TMP_DIR)/linters
|
@rm -rf $(TMP_DIR)/linters
|
||||||
@rmdir $(TMP_DIR) 2>/dev/null || true
|
@rmdir $(TMP_DIR) 2>/dev/null || true
|
||||||
@CGO_ENABLED=1 GOBIN=$(LINT_DIR) go install -trimpath github.com/golangci/golangci-lint/cmd/golangci-lint@v$(LINT_VERSION)
|
@CGO_ENABLED=1 GOBIN=$(LINT_DIR) go install github.com/golangci/golangci-lint/cmd/golangci-lint@v$(LINT_VERSION)
|
||||||
|
|
||||||
# Run linters
|
# Run linters
|
||||||
lint:
|
lint:
|
||||||
|
|
17
README.md
17
README.md
|
@ -7,8 +7,9 @@
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
---
|
---
|
||||||
[![Report](https://goreportcard.com/badge/git.frostfs.info/TrueCloudLab/frostfs-node)](https://goreportcard.com/report/git.frostfs.info/TrueCloudLab/frostfs-node)
|
[![Report](https://goreportcard.com/badge/github.com/TrueCloudLab/frostfs-node)](https://goreportcard.com/report/github.com/TrueCloudLab/frostfs-node)
|
||||||
![Release (latest)](https://git.frostfs.info/TrueCloudLab/frostfs-node/badges/release.svg)
|
![GitHub release (latest SemVer)](https://img.shields.io/github/v/release/TrueCloudLab/frostfs-node?sort=semver)
|
||||||
|
![License](https://img.shields.io/github/license/TrueCloudLab/frostfs-node.svg?style=popout)
|
||||||
|
|
||||||
# Overview
|
# Overview
|
||||||
|
|
||||||
|
@ -32,8 +33,8 @@ manipulate large amounts of data without paying a prohibitive price.
|
||||||
|
|
||||||
FrostFS has a native [gRPC API](https://git.frostfs.info/TrueCloudLab/frostfs-api) and has
|
FrostFS has a native [gRPC API](https://git.frostfs.info/TrueCloudLab/frostfs-api) and has
|
||||||
protocol gateways for popular protocols such as [AWS
|
protocol gateways for popular protocols such as [AWS
|
||||||
S3](https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw),
|
S3](https://github.com/TrueCloudLab/frostfs-s3-gw),
|
||||||
[HTTP](https://git.frostfs.info/TrueCloudLab/frostfs-http-gw),
|
[HTTP](https://github.com/TrueCloudLab/frostfs-http-gw),
|
||||||
[FUSE](https://wikipedia.org/wiki/Filesystem_in_Userspace) and
|
[FUSE](https://wikipedia.org/wiki/Filesystem_in_Userspace) and
|
||||||
[sFTP](https://en.wikipedia.org/wiki/SSH_File_Transfer_Protocol) allowing
|
[sFTP](https://en.wikipedia.org/wiki/SSH_File_Transfer_Protocol) allowing
|
||||||
developers to integrate applications without rewriting their code.
|
developers to integrate applications without rewriting their code.
|
||||||
|
@ -44,11 +45,11 @@ Now, we only support GNU/Linux on amd64 CPUs with AVX/AVX2 instructions. More
|
||||||
platforms will be officially supported after release `1.0`.
|
platforms will be officially supported after release `1.0`.
|
||||||
|
|
||||||
The latest version of frostfs-node works with frostfs-contract
|
The latest version of frostfs-node works with frostfs-contract
|
||||||
[v0.19.2](https://git.frostfs.info/TrueCloudLab/frostfs-contract/releases/tag/v0.19.2).
|
[v0.16.0](https://github.com/TrueCloudLab/frostfs-contract/releases/tag/v0.16.0).
|
||||||
|
|
||||||
# Building
|
# Building
|
||||||
|
|
||||||
To make all binaries you need Go 1.22+ and `make`:
|
To make all binaries you need Go 1.21+ and `make`:
|
||||||
```
|
```
|
||||||
make all
|
make all
|
||||||
```
|
```
|
||||||
|
@ -70,7 +71,7 @@ make docker/bin/frostfs-<name> # build a specific binary
|
||||||
|
|
||||||
## Docker images
|
## Docker images
|
||||||
|
|
||||||
To make docker images suitable for use in [frostfs-dev-env](https://git.frostfs.info/TrueCloudLab/frostfs-dev-env/) use:
|
To make docker images suitable for use in [frostfs-dev-env](https://github.com/TrueCloudLab/frostfs-dev-env/) use:
|
||||||
```
|
```
|
||||||
make images
|
make images
|
||||||
```
|
```
|
||||||
|
@ -124,7 +125,7 @@ the feature/topic you are going to implement.
|
||||||
|
|
||||||
# Credits
|
# Credits
|
||||||
|
|
||||||
FrostFS is maintained by [True Cloud Lab](https://git.frostfs.info/TrueCloudLab/) with the help and
|
FrostFS is maintained by [True Cloud Lab](https://github.com/TrueCloudLab/) with the help and
|
||||||
contributions from community members.
|
contributions from community members.
|
||||||
|
|
||||||
Please see [CREDITS](CREDITS.md) for details.
|
Please see [CREDITS](CREDITS.md) for details.
|
||||||
|
|
|
@ -9,8 +9,8 @@ related configuration details.
|
||||||
|
|
||||||
To follow this guide you need:
|
To follow this guide you need:
|
||||||
- latest released version of [neo-go](https://github.com/nspcc-dev/neo-go/releases) (v0.97.2 at the moment),
|
- latest released version of [neo-go](https://github.com/nspcc-dev/neo-go/releases) (v0.97.2 at the moment),
|
||||||
- latest released version of [frostfs-adm](https://git.frostfs.info/TrueCloudLab/frostfs-node/releases) utility (v0.42.9 at the moment),
|
- latest released version of [frostfs-adm](https://github.com/TrueCloudLab/frostfs-node/releases) utility (v0.25.1 at the moment),
|
||||||
- latest released version of compiled [frostfs-contract](https://git.frostfs.info/TrueCloudLab/frostfs-contract/releases) (v0.19.2 at the moment).
|
- latest released version of compiled [frostfs-contract](https://github.com/TrueCloudLab/frostfs-contract/releases) (v0.11.0 at the moment).
|
||||||
|
|
||||||
## Step 1: Prepare network configuration
|
## Step 1: Prepare network configuration
|
||||||
|
|
||||||
|
|
|
@ -1,15 +0,0 @@
|
||||||
package metabase
|
|
||||||
|
|
||||||
import "github.com/spf13/cobra"
|
|
||||||
|
|
||||||
// RootCmd is a root command of config section.
|
|
||||||
var RootCmd = &cobra.Command{
|
|
||||||
Use: "metabase",
|
|
||||||
Short: "Section for metabase commands",
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
RootCmd.AddCommand(UpgradeCmd)
|
|
||||||
|
|
||||||
initUpgradeCommand()
|
|
||||||
}
|
|
|
@ -1,99 +0,0 @@
|
||||||
package metabase
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
|
|
||||||
engineconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine"
|
|
||||||
shardconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard"
|
|
||||||
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
|
|
||||||
"github.com/spf13/cobra"
|
|
||||||
"golang.org/x/sync/errgroup"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
pathFlag = "path"
|
|
||||||
noCompactFlag = "no-compact"
|
|
||||||
)
|
|
||||||
|
|
||||||
var errNoPathsFound = errors.New("no metabase paths found")
|
|
||||||
|
|
||||||
var path string
|
|
||||||
|
|
||||||
var UpgradeCmd = &cobra.Command{
|
|
||||||
Use: "upgrade",
|
|
||||||
Short: "Upgrade metabase to latest version",
|
|
||||||
RunE: upgrade,
|
|
||||||
}
|
|
||||||
|
|
||||||
func upgrade(cmd *cobra.Command, _ []string) error {
|
|
||||||
configFile, err := cmd.Flags().GetString(commonflags.ConfigFlag)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
configDir, err := cmd.Flags().GetString(commonflags.ConfigDirFlag)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
noCompact, _ := cmd.Flags().GetBool(noCompactFlag)
|
|
||||||
var paths []string
|
|
||||||
if path != "" {
|
|
||||||
paths = append(paths, path)
|
|
||||||
}
|
|
||||||
appCfg := config.New(configFile, configDir, config.EnvPrefix)
|
|
||||||
if err := engineconfig.IterateShards(appCfg, false, func(sc *shardconfig.Config) error {
|
|
||||||
paths = append(paths, sc.Metabase().Path())
|
|
||||||
return nil
|
|
||||||
}); err != nil {
|
|
||||||
return fmt.Errorf("failed to get metabase paths: %w", err)
|
|
||||||
}
|
|
||||||
if len(paths) == 0 {
|
|
||||||
return errNoPathsFound
|
|
||||||
}
|
|
||||||
cmd.Println("found", len(paths), "metabases:")
|
|
||||||
for i, path := range paths {
|
|
||||||
cmd.Println(i+1, ":", path)
|
|
||||||
}
|
|
||||||
result := make(map[string]bool)
|
|
||||||
var resultGuard sync.Mutex
|
|
||||||
eg, ctx := errgroup.WithContext(cmd.Context())
|
|
||||||
for _, path := range paths {
|
|
||||||
eg.Go(func() error {
|
|
||||||
var success bool
|
|
||||||
cmd.Println("upgrading metabase", path, "...")
|
|
||||||
if err := meta.Upgrade(ctx, path, !noCompact, func(a ...any) {
|
|
||||||
cmd.Println(append([]any{time.Now().Format(time.RFC3339), ":", path, ":"}, a...)...)
|
|
||||||
}); err != nil {
|
|
||||||
cmd.Println("error: failed to upgrade metabase", path, ":", err)
|
|
||||||
} else {
|
|
||||||
success = true
|
|
||||||
cmd.Println("metabase", path, "upgraded successfully")
|
|
||||||
}
|
|
||||||
resultGuard.Lock()
|
|
||||||
result[path] = success
|
|
||||||
resultGuard.Unlock()
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
}
|
|
||||||
if err := eg.Wait(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
for mb, ok := range result {
|
|
||||||
if ok {
|
|
||||||
cmd.Println(mb, ": success")
|
|
||||||
} else {
|
|
||||||
cmd.Println(mb, ": failed")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func initUpgradeCommand() {
|
|
||||||
flags := UpgradeCmd.Flags()
|
|
||||||
flags.StringVar(&path, pathFlag, "", "Path to metabase file")
|
|
||||||
flags.Bool(noCompactFlag, false, "Do not compact upgraded metabase file")
|
|
||||||
}
|
|
|
@ -38,12 +38,6 @@ var (
|
||||||
func parseTarget(cmd *cobra.Command) policyengine.Target {
|
func parseTarget(cmd *cobra.Command) policyengine.Target {
|
||||||
name, _ := cmd.Flags().GetString(targetNameFlag)
|
name, _ := cmd.Flags().GetString(targetNameFlag)
|
||||||
typ, err := parseTargetType(cmd)
|
typ, err := parseTargetType(cmd)
|
||||||
|
|
||||||
// interpret "root" namespace as empty
|
|
||||||
if typ == policyengine.Namespace && name == "root" {
|
|
||||||
name = ""
|
|
||||||
}
|
|
||||||
|
|
||||||
commonCmd.ExitOnErr(cmd, "read target type error: %w", err)
|
commonCmd.ExitOnErr(cmd, "read target type error: %w", err)
|
||||||
|
|
||||||
return policyengine.Target{
|
return policyengine.Target{
|
||||||
|
|
|
@ -68,7 +68,7 @@ func dumpContractHashes(cmd *cobra.Command, _ []string) error {
|
||||||
|
|
||||||
if irSize != 0 {
|
if irSize != 0 {
|
||||||
bw.Reset()
|
bw.Reset()
|
||||||
for i := range irSize {
|
for i := 0; i < irSize; i++ {
|
||||||
emit.AppCall(bw.BinWriter, cs.Hash, "resolve", callflag.ReadOnly,
|
emit.AppCall(bw.BinWriter, cs.Hash, "resolve", callflag.ReadOnly,
|
||||||
helper.GetAlphabetNNSDomain(i),
|
helper.GetAlphabetNNSDomain(i),
|
||||||
int64(nns.TXT))
|
int64(nns.TXT))
|
||||||
|
@ -79,7 +79,7 @@ func dumpContractHashes(cmd *cobra.Command, _ []string) error {
|
||||||
return fmt.Errorf("can't fetch info from NNS: %w", err)
|
return fmt.Errorf("can't fetch info from NNS: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := range irSize {
|
for i := 0; i < irSize; i++ {
|
||||||
info := contractDumpInfo{name: fmt.Sprintf("alphabet %d", i)}
|
info := contractDumpInfo{name: fmt.Sprintf("alphabet %d", i)}
|
||||||
if h, err := helper.ParseNNSResolveResult(alphaRes.Stack[i]); err == nil {
|
if h, err := helper.ParseNNSResolveResult(alphaRes.Stack[i]); err == nil {
|
||||||
info.hash = h
|
info.hash = h
|
||||||
|
|
|
@ -73,6 +73,7 @@ func initializeWallets(v *viper.Viper, walletDir string, size int) ([]string, er
|
||||||
return nil, fmt.Errorf("can't fetch password: %w", err)
|
return nil, fmt.Errorf("can't fetch password: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
i := i
|
||||||
errG.Go(func() error {
|
errG.Go(func() error {
|
||||||
p := filepath.Join(walletDir, innerring.GlagoliticLetter(i).String()+".json")
|
p := filepath.Join(walletDir, innerring.GlagoliticLetter(i).String()+".json")
|
||||||
f, err := os.OpenFile(p, os.O_CREATE, 0o644)
|
f, err := os.OpenFile(p, os.O_CREATE, 0o644)
|
||||||
|
@ -106,6 +107,7 @@ func initializeWallets(v *viper.Viper, walletDir string, size int) ([]string, er
|
||||||
// Create consensus account with 2*N/3+1 multi-signature.
|
// Create consensus account with 2*N/3+1 multi-signature.
|
||||||
bftCount := smartcontract.GetDefaultHonestNodeCount(size)
|
bftCount := smartcontract.GetDefaultHonestNodeCount(size)
|
||||||
for i := range wallets {
|
for i := range wallets {
|
||||||
|
i := i
|
||||||
ps := pubs.Copy()
|
ps := pubs.Copy()
|
||||||
errG.Go(func() error {
|
errG.Go(func() error {
|
||||||
if err := addMultisigAccount(wallets[i], majCount, constants.CommitteeAccountName, passwords[i], ps); err != nil {
|
if err := addMultisigAccount(wallets[i], majCount, constants.CommitteeAccountName, passwords[i], ps); err != nil {
|
||||||
|
|
|
@ -224,7 +224,7 @@ func (l *LocalClient) CalculateNetworkFee(tx *transaction.Transaction) (int64, e
|
||||||
paramz = []manifest.Parameter{{Type: smartcontract.SignatureType}}
|
paramz = []manifest.Parameter{{Type: smartcontract.SignatureType}}
|
||||||
} else if nSigs, _, ok := vm.ParseMultiSigContract(w.VerificationScript); ok {
|
} else if nSigs, _, ok := vm.ParseMultiSigContract(w.VerificationScript); ok {
|
||||||
paramz = make([]manifest.Parameter, nSigs)
|
paramz = make([]manifest.Parameter, nSigs)
|
||||||
for j := range nSigs {
|
for j := 0; j < nSigs; j++ {
|
||||||
paramz[j] = manifest.Parameter{Type: smartcontract.SignatureType}
|
paramz[j] = manifest.Parameter{Type: smartcontract.SignatureType}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -44,7 +44,7 @@ func openAlphabetWallets(v *viper.Viper, walletDir string) ([]*wallet.Wallet, er
|
||||||
|
|
||||||
var wallets []*wallet.Wallet
|
var wallets []*wallet.Wallet
|
||||||
var letter string
|
var letter string
|
||||||
for i := range constants.MaxAlphabetNodes {
|
for i := 0; i < constants.MaxAlphabetNodes; i++ {
|
||||||
letter = innerring.GlagoliticLetter(i).String()
|
letter = innerring.GlagoliticLetter(i).String()
|
||||||
p := filepath.Join(walletDir, letter+".json")
|
p := filepath.Join(walletDir, letter+".json")
|
||||||
var w *wallet.Wallet
|
var w *wallet.Wallet
|
||||||
|
|
|
@ -113,7 +113,7 @@ func generateTestData(dir string, size int) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
var pubs []string
|
var pubs []string
|
||||||
for i := range size {
|
for i := 0; i < size; i++ {
|
||||||
p := filepath.Join(dir, innerring.GlagoliticLetter(i).String()+".json")
|
p := filepath.Join(dir, innerring.GlagoliticLetter(i).String()+".json")
|
||||||
w, err := wallet.NewWalletFromFile(p)
|
w, err := wallet.NewWalletFromFile(p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -148,7 +148,7 @@ func generateTestData(dir string, size int) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func setTestCredentials(v *viper.Viper, size int) {
|
func setTestCredentials(v *viper.Viper, size int) {
|
||||||
for i := range size {
|
for i := 0; i < size; i++ {
|
||||||
v.Set("credentials."+innerring.GlagoliticLetter(i).String(), strconv.FormatUint(uint64(i), 10))
|
v.Set("credentials."+innerring.GlagoliticLetter(i).String(), strconv.FormatUint(uint64(i), 10))
|
||||||
}
|
}
|
||||||
v.Set("credentials.contract", constants.TestContractPassword)
|
v.Set("credentials.contract", constants.TestContractPassword)
|
||||||
|
|
|
@ -5,7 +5,6 @@ import (
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/metabase"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/storagecfg"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/storagecfg"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/misc"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/misc"
|
||||||
|
@ -42,7 +41,6 @@ func init() {
|
||||||
rootCmd.AddCommand(config.RootCmd)
|
rootCmd.AddCommand(config.RootCmd)
|
||||||
rootCmd.AddCommand(morph.RootCmd)
|
rootCmd.AddCommand(morph.RootCmd)
|
||||||
rootCmd.AddCommand(storagecfg.RootCmd)
|
rootCmd.AddCommand(storagecfg.RootCmd)
|
||||||
rootCmd.AddCommand(metabase.RootCmd)
|
|
||||||
|
|
||||||
rootCmd.AddCommand(autocomplete.Command("frostfs-adm"))
|
rootCmd.AddCommand(autocomplete.Command("frostfs-adm"))
|
||||||
rootCmd.AddCommand(gendoc.Command(rootCmd, gendoc.Options{}))
|
rootCmd.AddCommand(gendoc.Command(rootCmd, gendoc.Options{}))
|
||||||
|
|
|
@ -2,13 +2,10 @@ package internal
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"cmp"
|
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
|
||||||
"slices"
|
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
@ -17,6 +14,7 @@ import (
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
|
||||||
containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
||||||
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||||
|
@ -191,6 +189,54 @@ func DeleteContainer(ctx context.Context, prm DeleteContainerPrm) (res DeleteCon
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// EACLPrm groups parameters of EACL operation.
|
||||||
|
type EACLPrm struct {
|
||||||
|
Client *client.Client
|
||||||
|
ClientParams client.PrmContainerEACL
|
||||||
|
}
|
||||||
|
|
||||||
|
// EACLRes groups the resulting values of EACL operation.
|
||||||
|
type EACLRes struct {
|
||||||
|
cliRes *client.ResContainerEACL
|
||||||
|
}
|
||||||
|
|
||||||
|
// EACL returns requested eACL table.
|
||||||
|
func (x EACLRes) EACL() eacl.Table {
|
||||||
|
return x.cliRes.Table()
|
||||||
|
}
|
||||||
|
|
||||||
|
// EACL reads eACL table from FrostFS by container ID.
|
||||||
|
//
|
||||||
|
// Returns any error which prevented the operation from completing correctly in error return.
|
||||||
|
func EACL(ctx context.Context, prm EACLPrm) (res EACLRes, err error) {
|
||||||
|
res.cliRes, err = prm.Client.ContainerEACL(ctx, prm.ClientParams)
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetEACLPrm groups parameters of SetEACL operation.
|
||||||
|
type SetEACLPrm struct {
|
||||||
|
Client *client.Client
|
||||||
|
ClientParams client.PrmContainerSetEACL
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetEACLRes groups the resulting values of SetEACL operation.
|
||||||
|
type SetEACLRes struct{}
|
||||||
|
|
||||||
|
// SetEACL requests to save an eACL table in FrostFS.
|
||||||
|
//
|
||||||
|
// Operation is asynchronous and no guaranteed even in the absence of errors.
|
||||||
|
// The required time is also not predictable.
|
||||||
|
//
|
||||||
|
// Success can be verified by reading by container identifier.
|
||||||
|
//
|
||||||
|
// Returns any error which prevented the operation from completing correctly in error return.
|
||||||
|
func SetEACL(ctx context.Context, prm SetEACLPrm) (res SetEACLRes, err error) {
|
||||||
|
_, err = prm.Client.ContainerSetEACL(ctx, prm.ClientParams)
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
// NetworkInfoPrm groups parameters of NetworkInfo operation.
|
// NetworkInfoPrm groups parameters of NetworkInfo operation.
|
||||||
type NetworkInfoPrm struct {
|
type NetworkInfoPrm struct {
|
||||||
Client *client.Client
|
Client *client.Client
|
||||||
|
@ -666,7 +712,7 @@ func SearchObjects(ctx context.Context, prm SearchObjectsPrm) (*SearchObjectsRes
|
||||||
|
|
||||||
for {
|
for {
|
||||||
n, ok = rdr.Read(buf)
|
n, ok = rdr.Read(buf)
|
||||||
for i := range n {
|
for i := 0; i < n; i++ {
|
||||||
list = append(list, buf[i])
|
list = append(list, buf[i])
|
||||||
}
|
}
|
||||||
if !ok {
|
if !ok {
|
||||||
|
@ -846,65 +892,3 @@ func SyncContainerSettings(ctx context.Context, prm SyncContainerPrm) (*SyncCont
|
||||||
|
|
||||||
return new(SyncContainerRes), nil
|
return new(SyncContainerRes), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// PatchObjectPrm groups parameters of PatchObject operation.
|
|
||||||
type PatchObjectPrm struct {
|
|
||||||
commonObjectPrm
|
|
||||||
objectAddressPrm
|
|
||||||
|
|
||||||
NewAttributes []objectSDK.Attribute
|
|
||||||
|
|
||||||
ReplaceAttribute bool
|
|
||||||
|
|
||||||
PayloadPatches []PayloadPatch
|
|
||||||
}
|
|
||||||
|
|
||||||
type PayloadPatch struct {
|
|
||||||
Range objectSDK.Range
|
|
||||||
|
|
||||||
PayloadPath string
|
|
||||||
}
|
|
||||||
|
|
||||||
type PatchRes struct {
|
|
||||||
OID oid.ID
|
|
||||||
}
|
|
||||||
|
|
||||||
func Patch(ctx context.Context, prm PatchObjectPrm) (*PatchRes, error) {
|
|
||||||
patchPrm := client.PrmObjectPatch{
|
|
||||||
XHeaders: prm.xHeaders,
|
|
||||||
BearerToken: prm.bearerToken,
|
|
||||||
Session: prm.sessionToken,
|
|
||||||
Address: prm.objAddr,
|
|
||||||
}
|
|
||||||
|
|
||||||
slices.SortFunc(prm.PayloadPatches, func(a, b PayloadPatch) int {
|
|
||||||
return cmp.Compare(a.Range.GetOffset(), b.Range.GetOffset())
|
|
||||||
})
|
|
||||||
|
|
||||||
patcher, err := prm.cli.ObjectPatchInit(ctx, patchPrm)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("init payload reading: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if patcher.PatchAttributes(ctx, prm.NewAttributes, prm.ReplaceAttribute) {
|
|
||||||
for _, pp := range prm.PayloadPatches {
|
|
||||||
payloadFile, err := os.OpenFile(pp.PayloadPath, os.O_RDONLY, os.ModePerm)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
applied := patcher.PatchPayload(ctx, &pp.Range, payloadFile)
|
|
||||||
_ = payloadFile.Close()
|
|
||||||
if !applied {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
res, err := patcher.Close(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &PatchRes{
|
|
||||||
OID: res.ObjectID(),
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
|
@ -24,8 +24,6 @@ var testCmd = &cobra.Command{
|
||||||
}
|
}
|
||||||
|
|
||||||
func Test_getOrGenerate(t *testing.T) {
|
func Test_getOrGenerate(t *testing.T) {
|
||||||
t.Cleanup(viper.Reset)
|
|
||||||
|
|
||||||
dir := t.TempDir()
|
dir := t.TempDir()
|
||||||
|
|
||||||
wallPath := filepath.Join(dir, "wallet.json")
|
wallPath := filepath.Join(dir, "wallet.json")
|
||||||
|
|
|
@ -139,7 +139,7 @@ It will be stored in sidechain when inner ring will accepts it.`,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for range awaitTimeout {
|
for i := 0; i < awaitTimeout; i++ {
|
||||||
time.Sleep(1 * time.Second)
|
time.Sleep(1 * time.Second)
|
||||||
|
|
||||||
_, err := internalclient.GetContainer(cmd.Context(), getPrm)
|
_, err := internalclient.GetContainer(cmd.Context(), getPrm)
|
||||||
|
|
|
@ -110,7 +110,7 @@ Only owner of the container has a permission to remove container.`,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for range awaitTimeout {
|
for i := 0; i < awaitTimeout; i++ {
|
||||||
time.Sleep(1 * time.Second)
|
time.Sleep(1 * time.Second)
|
||||||
|
|
||||||
_, err := internalclient.GetContainer(cmd.Context(), getPrm)
|
_, err := internalclient.GetContainer(cmd.Context(), getPrm)
|
||||||
|
|
68
cmd/frostfs-cli/modules/container/get_eacl.go
Normal file
68
cmd/frostfs-cli/modules/container/get_eacl.go
Normal file
|
@ -0,0 +1,68 @@
|
||||||
|
package container
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
|
||||||
|
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
|
||||||
|
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
var getExtendedACLCmd = &cobra.Command{
|
||||||
|
Use: "get-eacl",
|
||||||
|
Short: "Get extended ACL table of container",
|
||||||
|
Long: `Get extended ACL table of container`,
|
||||||
|
Run: func(cmd *cobra.Command, _ []string) {
|
||||||
|
id := parseContainerID(cmd)
|
||||||
|
pk := key.GetOrGenerate(cmd)
|
||||||
|
cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC)
|
||||||
|
|
||||||
|
eaclPrm := internalclient.EACLPrm{
|
||||||
|
Client: cli,
|
||||||
|
ClientParams: client.PrmContainerEACL{
|
||||||
|
ContainerID: &id,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
res, err := internalclient.EACL(cmd.Context(), eaclPrm)
|
||||||
|
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
|
||||||
|
|
||||||
|
eaclTable := res.EACL()
|
||||||
|
|
||||||
|
if containerPathTo == "" {
|
||||||
|
cmd.Println("eACL: ")
|
||||||
|
common.PrettyPrintJSON(cmd, &eaclTable, "eACL")
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var data []byte
|
||||||
|
|
||||||
|
if containerJSON {
|
||||||
|
data, err = eaclTable.MarshalJSON()
|
||||||
|
commonCmd.ExitOnErr(cmd, "can't encode to JSON: %w", err)
|
||||||
|
} else {
|
||||||
|
data, err = eaclTable.Marshal()
|
||||||
|
commonCmd.ExitOnErr(cmd, "can't encode to binary: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd.Println("dumping data to file:", containerPathTo)
|
||||||
|
|
||||||
|
err = os.WriteFile(containerPathTo, data, 0o644)
|
||||||
|
commonCmd.ExitOnErr(cmd, "could not write eACL to file: %w", err)
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func initContainerGetEACLCmd() {
|
||||||
|
commonflags.Init(getExtendedACLCmd)
|
||||||
|
|
||||||
|
flags := getExtendedACLCmd.Flags()
|
||||||
|
|
||||||
|
flags.StringVar(&containerID, commonflags.CIDFlag, "", commonflags.CIDFlagUsage)
|
||||||
|
flags.StringVar(&containerPathTo, "to", "", "Path to dump encoded container (default: binary encoded)")
|
||||||
|
flags.BoolVar(&containerJSON, commonflags.JSON, false, "Encode EACL table in json format")
|
||||||
|
}
|
|
@ -1,6 +1,9 @@
|
||||||
package container
|
package container
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container"
|
||||||
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
|
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
|
||||||
|
@ -67,6 +70,7 @@ var listContainersCmd = &cobra.Command{
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
cnrID := cnrID
|
||||||
prmGet.ClientParams.ContainerID = &cnrID
|
prmGet.ClientParams.ContainerID = &cnrID
|
||||||
res, err := internalclient.GetContainer(cmd.Context(), prmGet)
|
res, err := internalclient.GetContainer(cmd.Context(), prmGet)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -81,8 +85,12 @@ var listContainersCmd = &cobra.Command{
|
||||||
cmd.Println(cnrID.String())
|
cmd.Println(cnrID.String())
|
||||||
|
|
||||||
if flagVarListPrintAttr {
|
if flagVarListPrintAttr {
|
||||||
cnr.IterateUserAttributes(func(key, val string) {
|
cnr.IterateAttributes(func(key, val string) {
|
||||||
cmd.Printf(" %s: %s\n", key, val)
|
if !strings.HasPrefix(key, container.SysAttributePrefix) && !strings.HasPrefix(key, container.SysAttributePrefixNeoFS) {
|
||||||
|
// FIXME(@cthulhu-rider): https://git.frostfs.info/TrueCloudLab/frostfs-sdk-go/issues/97
|
||||||
|
// Use dedicated method to skip system attributes.
|
||||||
|
cmd.Printf(" %s: %s\n", key, val)
|
||||||
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -25,6 +25,8 @@ func init() {
|
||||||
deleteContainerCmd,
|
deleteContainerCmd,
|
||||||
listContainerObjectsCmd,
|
listContainerObjectsCmd,
|
||||||
getContainerInfoCmd,
|
getContainerInfoCmd,
|
||||||
|
getExtendedACLCmd,
|
||||||
|
setExtendedACLCmd,
|
||||||
containerNodesCmd,
|
containerNodesCmd,
|
||||||
policyPlaygroundCmd,
|
policyPlaygroundCmd,
|
||||||
}
|
}
|
||||||
|
@ -36,6 +38,8 @@ func init() {
|
||||||
initContainerDeleteCmd()
|
initContainerDeleteCmd()
|
||||||
initContainerListObjectsCmd()
|
initContainerListObjectsCmd()
|
||||||
initContainerInfoCmd()
|
initContainerInfoCmd()
|
||||||
|
initContainerGetEACLCmd()
|
||||||
|
initContainerSetEACLCmd()
|
||||||
initContainerNodesCmd()
|
initContainerNodesCmd()
|
||||||
initContainerPolicyPlaygroundCmd()
|
initContainerPolicyPlaygroundCmd()
|
||||||
|
|
||||||
|
@ -49,6 +53,7 @@ func init() {
|
||||||
}{
|
}{
|
||||||
{createContainerCmd, "PUT"},
|
{createContainerCmd, "PUT"},
|
||||||
{deleteContainerCmd, "DELETE"},
|
{deleteContainerCmd, "DELETE"},
|
||||||
|
{setExtendedACLCmd, "SETEACL"},
|
||||||
} {
|
} {
|
||||||
commonflags.InitSession(el.cmd, "container "+el.verb)
|
commonflags.InitSession(el.cmd, "container "+el.verb)
|
||||||
}
|
}
|
||||||
|
|
108
cmd/frostfs-cli/modules/container/set_eacl.go
Normal file
108
cmd/frostfs-cli/modules/container/set_eacl.go
Normal file
|
@ -0,0 +1,108 @@
|
||||||
|
package container
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
|
||||||
|
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
var flagVarsSetEACL struct {
|
||||||
|
noPreCheck bool
|
||||||
|
|
||||||
|
srcPath string
|
||||||
|
}
|
||||||
|
|
||||||
|
var setExtendedACLCmd = &cobra.Command{
|
||||||
|
Use: "set-eacl",
|
||||||
|
Short: "Set new extended ACL table for container",
|
||||||
|
Long: `Set new extended ACL table for container.
|
||||||
|
Container ID in EACL table will be substituted with ID from the CLI.`,
|
||||||
|
Run: func(cmd *cobra.Command, _ []string) {
|
||||||
|
id := parseContainerID(cmd)
|
||||||
|
eaclTable := common.ReadEACL(cmd, flagVarsSetEACL.srcPath)
|
||||||
|
|
||||||
|
tok := getSession(cmd)
|
||||||
|
|
||||||
|
eaclTable.SetCID(id)
|
||||||
|
|
||||||
|
pk := key.GetOrGenerate(cmd)
|
||||||
|
cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC)
|
||||||
|
|
||||||
|
if !flagVarsSetEACL.noPreCheck {
|
||||||
|
cmd.Println("Checking the ability to modify access rights in the container...")
|
||||||
|
|
||||||
|
extendable, err := internalclient.IsACLExtendable(cmd.Context(), cli, id)
|
||||||
|
commonCmd.ExitOnErr(cmd, "Extensibility check failure: %w", err)
|
||||||
|
|
||||||
|
if !extendable {
|
||||||
|
commonCmd.ExitOnErr(cmd, "", errors.New("container ACL is immutable"))
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd.Println("ACL extension is enabled in the container, continue processing.")
|
||||||
|
}
|
||||||
|
|
||||||
|
setEACLPrm := internalclient.SetEACLPrm{
|
||||||
|
Client: cli,
|
||||||
|
ClientParams: client.PrmContainerSetEACL{
|
||||||
|
Table: eaclTable,
|
||||||
|
Session: tok,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := internalclient.SetEACL(cmd.Context(), setEACLPrm)
|
||||||
|
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
|
||||||
|
|
||||||
|
if containerAwait {
|
||||||
|
exp, err := eaclTable.Marshal()
|
||||||
|
commonCmd.ExitOnErr(cmd, "broken EACL table: %w", err)
|
||||||
|
|
||||||
|
cmd.Println("awaiting...")
|
||||||
|
|
||||||
|
getEACLPrm := internalclient.EACLPrm{
|
||||||
|
Client: cli,
|
||||||
|
ClientParams: client.PrmContainerEACL{
|
||||||
|
ContainerID: &id,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < awaitTimeout; i++ {
|
||||||
|
time.Sleep(1 * time.Second)
|
||||||
|
|
||||||
|
res, err := internalclient.EACL(cmd.Context(), getEACLPrm)
|
||||||
|
if err == nil {
|
||||||
|
// compare binary values because EACL could have been set already
|
||||||
|
table := res.EACL()
|
||||||
|
got, err := table.Marshal()
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if bytes.Equal(exp, got) {
|
||||||
|
cmd.Println("EACL has been persisted on sidechain")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
commonCmd.ExitOnErr(cmd, "", errSetEACLTimeout)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func initContainerSetEACLCmd() {
|
||||||
|
commonflags.Init(setExtendedACLCmd)
|
||||||
|
|
||||||
|
flags := setExtendedACLCmd.Flags()
|
||||||
|
flags.StringVar(&containerID, commonflags.CIDFlag, "", commonflags.CIDFlagUsage)
|
||||||
|
flags.StringVar(&flagVarsSetEACL.srcPath, "table", "", "path to file with JSON or binary encoded EACL table")
|
||||||
|
flags.BoolVar(&containerAwait, "await", false, "block execution until EACL is persisted")
|
||||||
|
flags.BoolVar(&flagVarsSetEACL.noPreCheck, "no-precheck", false, "do not pre-check the extensibility of the container ACL")
|
||||||
|
}
|
|
@ -18,8 +18,9 @@ const (
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
errCreateTimeout = errors.New("timeout: container has not been persisted on sidechain")
|
errCreateTimeout = errors.New("timeout: container has not been persisted on sidechain")
|
||||||
errDeleteTimeout = errors.New("timeout: container has not been removed from sidechain")
|
errDeleteTimeout = errors.New("timeout: container has not been removed from sidechain")
|
||||||
|
errSetEACLTimeout = errors.New("timeout: EACL has not been persisted on sidechain")
|
||||||
)
|
)
|
||||||
|
|
||||||
func parseContainerID(cmd *cobra.Command) cid.ID {
|
func parseContainerID(cmd *cobra.Command) cid.ID {
|
||||||
|
|
|
@ -1,88 +0,0 @@
|
||||||
package control
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
|
|
||||||
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
|
|
||||||
"github.com/mr-tron/base58"
|
|
||||||
"github.com/spf13/cobra"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
fillPercentFlag = "fill_percent"
|
|
||||||
)
|
|
||||||
|
|
||||||
var shardsRebuildCmd = &cobra.Command{
|
|
||||||
Use: "rebuild",
|
|
||||||
Short: "Rebuild shards",
|
|
||||||
Long: "Rebuild reclaims storage occupied by dead objects and adjusts the storage structure according to the configuration (for blobovnicza only now)",
|
|
||||||
Run: shardsRebuild,
|
|
||||||
}
|
|
||||||
|
|
||||||
func shardsRebuild(cmd *cobra.Command, _ []string) {
|
|
||||||
pk := key.Get(cmd)
|
|
||||||
|
|
||||||
req := &control.StartShardRebuildRequest{
|
|
||||||
Body: &control.StartShardRebuildRequest_Body{
|
|
||||||
Shard_ID: getShardIDList(cmd),
|
|
||||||
TargetFillPercent: getFillPercentValue(cmd),
|
|
||||||
ConcurrencyLimit: getConcurrencyValue(cmd),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
signRequest(cmd, pk, req)
|
|
||||||
|
|
||||||
cli := getClient(cmd, pk)
|
|
||||||
|
|
||||||
var resp *control.StartShardRebuildResponse
|
|
||||||
var err error
|
|
||||||
err = cli.ExecRaw(func(client *rawclient.Client) error {
|
|
||||||
resp, err = control.StartShardRebuild(client, req)
|
|
||||||
return err
|
|
||||||
})
|
|
||||||
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
|
|
||||||
|
|
||||||
verifyResponse(cmd, resp.GetSignature(), resp.GetBody())
|
|
||||||
|
|
||||||
var success, failed uint
|
|
||||||
for _, res := range resp.GetBody().GetResults() {
|
|
||||||
if res.GetSuccess() {
|
|
||||||
success++
|
|
||||||
cmd.Printf("Shard %s: OK\n", base58.Encode(res.GetShard_ID()))
|
|
||||||
} else {
|
|
||||||
failed++
|
|
||||||
cmd.Printf("Shard %s: failed with error %q\n", base58.Encode(res.GetShard_ID()), res.GetError())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
cmd.Printf("Total: %d success, %d failed\n", success, failed)
|
|
||||||
}
|
|
||||||
|
|
||||||
func getFillPercentValue(cmd *cobra.Command) uint32 {
|
|
||||||
v, _ := cmd.Flags().GetUint32(fillPercentFlag)
|
|
||||||
if v <= 0 || v > 100 {
|
|
||||||
commonCmd.ExitOnErr(cmd, "invalid fill_percent value", fmt.Errorf("fill_percent value must be (0, 100], current value: %d", v))
|
|
||||||
}
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
func getConcurrencyValue(cmd *cobra.Command) uint32 {
|
|
||||||
v, _ := cmd.Flags().GetUint32(concurrencyFlag)
|
|
||||||
if v <= 0 || v > 10000 {
|
|
||||||
commonCmd.ExitOnErr(cmd, "invalid concurrency value", fmt.Errorf("concurrency value must be (0, 10 000], current value: %d", v))
|
|
||||||
}
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
func initControlShardRebuildCmd() {
|
|
||||||
initControlFlags(shardsRebuildCmd)
|
|
||||||
|
|
||||||
flags := shardsRebuildCmd.Flags()
|
|
||||||
flags.StringSlice(shardIDFlag, nil, "List of shard IDs in base58 encoding")
|
|
||||||
flags.Bool(shardAllFlag, false, "Process all shards")
|
|
||||||
flags.Uint32(fillPercentFlag, 80, "Target fill percent to reclaim space")
|
|
||||||
flags.Uint32(concurrencyFlag, 20, "Maximum count of concurrently rebuilding files")
|
|
||||||
setShardModeCmd.MarkFlagsMutuallyExclusive(shardIDFlag, shardAllFlag)
|
|
||||||
}
|
|
|
@ -84,7 +84,7 @@ func setNetmapStatus(cmd *cobra.Command, _ []string) {
|
||||||
body.SetStatus(control.NetmapStatus_MAINTENANCE)
|
body.SetStatus(control.NetmapStatus_MAINTENANCE)
|
||||||
|
|
||||||
if force {
|
if force {
|
||||||
body.SetForceMaintenance(true)
|
body.SetForceMaintenance()
|
||||||
common.PrintVerbose(cmd, "Local maintenance will be forced.")
|
common.PrintVerbose(cmd, "Local maintenance will be forced.")
|
||||||
}
|
}
|
||||||
targetStatus = control.NetmapStatus_MAINTENANCE
|
targetStatus = control.NetmapStatus_MAINTENANCE
|
||||||
|
|
|
@ -19,7 +19,6 @@ func initControlShardsCmd() {
|
||||||
shardsCmd.AddCommand(doctorCmd)
|
shardsCmd.AddCommand(doctorCmd)
|
||||||
shardsCmd.AddCommand(writecacheShardCmd)
|
shardsCmd.AddCommand(writecacheShardCmd)
|
||||||
shardsCmd.AddCommand(shardsDetachCmd)
|
shardsCmd.AddCommand(shardsDetachCmd)
|
||||||
shardsCmd.AddCommand(shardsRebuildCmd)
|
|
||||||
|
|
||||||
initControlShardsListCmd()
|
initControlShardsListCmd()
|
||||||
initControlSetShardModeCmd()
|
initControlSetShardModeCmd()
|
||||||
|
@ -29,5 +28,4 @@ func initControlShardsCmd() {
|
||||||
initControlDoctorCmd()
|
initControlDoctorCmd()
|
||||||
initControlShardsWritecacheCmd()
|
initControlShardsWritecacheCmd()
|
||||||
initControlShardsDetachCmd()
|
initControlShardsDetachCmd()
|
||||||
initControlShardRebuildCmd()
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -61,18 +61,17 @@ func listShards(cmd *cobra.Command, _ []string) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func prettyPrintShardsJSON(cmd *cobra.Command, ii []control.ShardInfo) {
|
func prettyPrintShardsJSON(cmd *cobra.Command, ii []*control.ShardInfo) {
|
||||||
out := make([]map[string]any, 0, len(ii))
|
out := make([]map[string]any, 0, len(ii))
|
||||||
for _, i := range ii {
|
for _, i := range ii {
|
||||||
out = append(out, map[string]any{
|
out = append(out, map[string]any{
|
||||||
"shard_id": base58.Encode(i.GetShard_ID()),
|
"shard_id": base58.Encode(i.GetShard_ID()),
|
||||||
"mode": shardModeToString(i.GetMode()),
|
"mode": shardModeToString(i.GetMode()),
|
||||||
"metabase": i.GetMetabasePath(),
|
"metabase": i.GetMetabasePath(),
|
||||||
"blobstor": i.GetBlobstor(),
|
"blobstor": i.GetBlobstor(),
|
||||||
"writecache": i.GetWritecachePath(),
|
"writecache": i.GetWritecachePath(),
|
||||||
"pilorama": i.GetPiloramaPath(),
|
"pilorama": i.GetPiloramaPath(),
|
||||||
"error_count": i.GetErrorCount(),
|
"error_count": i.GetErrorCount(),
|
||||||
"evacuation_in_progress": i.GetEvacuationInProgress(),
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -84,7 +83,7 @@ func prettyPrintShardsJSON(cmd *cobra.Command, ii []control.ShardInfo) {
|
||||||
cmd.Print(buf.String()) // pretty printer emits newline, so no need for Println
|
cmd.Print(buf.String()) // pretty printer emits newline, so no need for Println
|
||||||
}
|
}
|
||||||
|
|
||||||
func prettyPrintShards(cmd *cobra.Command, ii []control.ShardInfo) {
|
func prettyPrintShards(cmd *cobra.Command, ii []*control.ShardInfo) {
|
||||||
for _, i := range ii {
|
for _, i := range ii {
|
||||||
pathPrinter := func(name, path string) string {
|
pathPrinter := func(name, path string) string {
|
||||||
if path == "" {
|
if path == "" {
|
||||||
|
@ -106,8 +105,7 @@ func prettyPrintShards(cmd *cobra.Command, ii []control.ShardInfo) {
|
||||||
sb.String()+
|
sb.String()+
|
||||||
pathPrinter("Write-cache", i.GetWritecachePath())+
|
pathPrinter("Write-cache", i.GetWritecachePath())+
|
||||||
pathPrinter("Pilorama", i.GetPiloramaPath())+
|
pathPrinter("Pilorama", i.GetPiloramaPath())+
|
||||||
fmt.Sprintf("Error count: %d\n", i.GetErrorCount())+
|
fmt.Sprintf("Error count: %d\n", i.GetErrorCount()),
|
||||||
fmt.Sprintf("Evacuation in progress: %t\n", i.GetEvacuationInProgress()),
|
|
||||||
base58.Encode(i.GetShard_ID()),
|
base58.Encode(i.GetShard_ID()),
|
||||||
shardModeToString(i.GetMode()),
|
shardModeToString(i.GetMode()),
|
||||||
)
|
)
|
||||||
|
@ -123,7 +121,7 @@ func shardModeToString(m control.ShardMode) string {
|
||||||
return "unknown"
|
return "unknown"
|
||||||
}
|
}
|
||||||
|
|
||||||
func sortShardsByID(ii []control.ShardInfo) {
|
func sortShardsByID(ii []*control.ShardInfo) {
|
||||||
sort.Slice(ii, func(i, j int) bool {
|
sort.Slice(ii, func(i, j int) bool {
|
||||||
return bytes.Compare(ii[i].GetShard_ID(), ii[j].GetShard_ID()) < 0
|
return bytes.Compare(ii[i].GetShard_ID(), ii[j].GetShard_ID()) < 0
|
||||||
})
|
})
|
||||||
|
|
|
@ -117,10 +117,10 @@ func setShardMode(cmd *cobra.Command, _ []string) {
|
||||||
req.SetBody(body)
|
req.SetBody(body)
|
||||||
|
|
||||||
body.SetMode(mode)
|
body.SetMode(mode)
|
||||||
body.SetShard_ID(getShardIDList(cmd))
|
body.SetShardIDList(getShardIDList(cmd))
|
||||||
|
|
||||||
reset, _ := cmd.Flags().GetBool(shardClearErrorsFlag)
|
reset, _ := cmd.Flags().GetBool(shardClearErrorsFlag)
|
||||||
body.SetResetErrorCounter(reset)
|
body.ClearErrorCounter(reset)
|
||||||
|
|
||||||
signRequest(cmd, pk, req)
|
signRequest(cmd, pk, req)
|
||||||
|
|
||||||
|
|
|
@ -44,7 +44,7 @@ func verifyResponse(cmd *cobra.Command,
|
||||||
GetSign() []byte
|
GetSign() []byte
|
||||||
},
|
},
|
||||||
body interface {
|
body interface {
|
||||||
MarshalProtobuf([]byte) []byte
|
StableMarshal([]byte) []byte
|
||||||
},
|
},
|
||||||
) {
|
) {
|
||||||
if sigControl == nil {
|
if sigControl == nil {
|
||||||
|
@ -60,7 +60,7 @@ func verifyResponse(cmd *cobra.Command,
|
||||||
var sig frostfscrypto.Signature
|
var sig frostfscrypto.Signature
|
||||||
commonCmd.ExitOnErr(cmd, "can't read signature: %w", sig.ReadFromV2(sigV2))
|
commonCmd.ExitOnErr(cmd, "can't read signature: %w", sig.ReadFromV2(sigV2))
|
||||||
|
|
||||||
if !sig.Verify(body.MarshalProtobuf(nil)) {
|
if !sig.Verify(body.StableMarshal(nil)) {
|
||||||
commonCmd.ExitOnErr(cmd, "", errors.New("invalid response signature"))
|
commonCmd.ExitOnErr(cmd, "", errors.New("invalid response signature"))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -9,12 +9,6 @@ import (
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
|
||||||
asyncFlag = "async"
|
|
||||||
restoreModeFlag = "restore-mode"
|
|
||||||
shrinkFlag = "shrink"
|
|
||||||
)
|
|
||||||
|
|
||||||
var writecacheShardCmd = &cobra.Command{
|
var writecacheShardCmd = &cobra.Command{
|
||||||
Use: "writecache",
|
Use: "writecache",
|
||||||
Short: "Operations with storage node's write-cache",
|
Short: "Operations with storage node's write-cache",
|
||||||
|
@ -32,16 +26,10 @@ func sealWritecache(cmd *cobra.Command, _ []string) {
|
||||||
pk := key.Get(cmd)
|
pk := key.Get(cmd)
|
||||||
|
|
||||||
ignoreErrors, _ := cmd.Flags().GetBool(ignoreErrorsFlag)
|
ignoreErrors, _ := cmd.Flags().GetBool(ignoreErrorsFlag)
|
||||||
async, _ := cmd.Flags().GetBool(asyncFlag)
|
|
||||||
restoreMode, _ := cmd.Flags().GetBool(restoreModeFlag)
|
|
||||||
shrink, _ := cmd.Flags().GetBool(shrinkFlag)
|
|
||||||
|
|
||||||
req := &control.SealWriteCacheRequest{Body: &control.SealWriteCacheRequest_Body{
|
req := &control.SealWriteCacheRequest{Body: &control.SealWriteCacheRequest_Body{
|
||||||
Shard_ID: getShardIDList(cmd),
|
Shard_ID: getShardIDList(cmd),
|
||||||
IgnoreErrors: ignoreErrors,
|
IgnoreErrors: ignoreErrors,
|
||||||
Async: async,
|
|
||||||
RestoreMode: restoreMode,
|
|
||||||
Shrink: shrink,
|
|
||||||
}}
|
}}
|
||||||
|
|
||||||
signRequest(cmd, pk, req)
|
signRequest(cmd, pk, req)
|
||||||
|
@ -80,9 +68,6 @@ func initControlShardsWritecacheCmd() {
|
||||||
ff.StringSlice(shardIDFlag, nil, "List of shard IDs in base58 encoding")
|
ff.StringSlice(shardIDFlag, nil, "List of shard IDs in base58 encoding")
|
||||||
ff.Bool(shardAllFlag, false, "Process all shards")
|
ff.Bool(shardAllFlag, false, "Process all shards")
|
||||||
ff.Bool(ignoreErrorsFlag, true, "Skip invalid/unreadable objects")
|
ff.Bool(ignoreErrorsFlag, true, "Skip invalid/unreadable objects")
|
||||||
ff.Bool(asyncFlag, false, "Run operation in background")
|
|
||||||
ff.Bool(restoreModeFlag, false, "Restore writecache's mode after sealing")
|
|
||||||
ff.Bool(shrinkFlag, false, "Shrink writecache's internal storage")
|
|
||||||
|
|
||||||
sealWritecacheShardCmd.MarkFlagsMutuallyExclusive(shardIDFlag, shardAllFlag)
|
sealWritecacheShardCmd.MarkFlagsMutuallyExclusive(shardIDFlag, shardAllFlag)
|
||||||
}
|
}
|
||||||
|
|
|
@ -49,14 +49,14 @@ func prettyPrintNodeInfo(cmd *cobra.Command, i netmap.NodeInfo) {
|
||||||
cmd.Println("key:", hex.EncodeToString(i.PublicKey()))
|
cmd.Println("key:", hex.EncodeToString(i.PublicKey()))
|
||||||
|
|
||||||
var stateWord string
|
var stateWord string
|
||||||
switch i.Status() {
|
switch {
|
||||||
default:
|
default:
|
||||||
stateWord = "<undefined>"
|
stateWord = "<undefined>"
|
||||||
case netmap.Online:
|
case i.IsOnline():
|
||||||
stateWord = "online"
|
stateWord = "online"
|
||||||
case netmap.Offline:
|
case i.IsOffline():
|
||||||
stateWord = "offline"
|
stateWord = "offline"
|
||||||
case netmap.Maintenance:
|
case i.IsMaintenance():
|
||||||
stateWord = "maintenance"
|
stateWord = "maintenance"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -30,8 +30,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
verifyPresenceAllFlag = "verify-presence-all"
|
verifyPresenceAllFlag = "verify-presence-all"
|
||||||
preferInternalAddressesFlag = "prefer-internal-addresses"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -98,7 +97,6 @@ func initObjectNodesCmd() {
|
||||||
|
|
||||||
flags.Bool(verifyPresenceAllFlag, false, "Verify the actual presence of the object on all netmap nodes.")
|
flags.Bool(verifyPresenceAllFlag, false, "Verify the actual presence of the object on all netmap nodes.")
|
||||||
flags.Bool(commonflags.JSON, false, "Print information about the object placement as json.")
|
flags.Bool(commonflags.JSON, false, "Print information about the object placement as json.")
|
||||||
flags.Bool(preferInternalAddressesFlag, false, "Use internal addresses first to get object info.")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func objectNodes(cmd *cobra.Command, _ []string) {
|
func objectNodes(cmd *cobra.Command, _ []string) {
|
||||||
|
@ -172,7 +170,7 @@ func getComplexObjectParts(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *
|
||||||
func getCompexObjectMembers(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, prmHead internalclient.HeadObjectPrm, errSplitInfo *objectSDK.SplitInfoError) []oid.ID {
|
func getCompexObjectMembers(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, prmHead internalclient.HeadObjectPrm, errSplitInfo *objectSDK.SplitInfoError) []oid.ID {
|
||||||
splitInfo := errSplitInfo.SplitInfo()
|
splitInfo := errSplitInfo.SplitInfo()
|
||||||
|
|
||||||
if members, ok := tryGetSplitMembersByLinkingObject(cmd, splitInfo, prmHead, cnrID); ok {
|
if members, ok := tryGetSplitMembersByLinkingObject(cmd, splitInfo, prmHead, cnrID, false); ok {
|
||||||
return members
|
return members
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -185,7 +183,6 @@ func getCompexObjectMembers(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli
|
||||||
|
|
||||||
func flattenComplexMembersIfECContainer(cmd *cobra.Command, cnrID cid.ID, members []oid.ID, prmHead internalclient.HeadObjectPrm) []phyObject {
|
func flattenComplexMembersIfECContainer(cmd *cobra.Command, cnrID cid.ID, members []oid.ID, prmHead internalclient.HeadObjectPrm) []phyObject {
|
||||||
result := make([]phyObject, 0, len(members))
|
result := make([]phyObject, 0, len(members))
|
||||||
var hasNonEC, hasEC bool
|
|
||||||
var resultGuard sync.Mutex
|
var resultGuard sync.Mutex
|
||||||
|
|
||||||
if len(members) == 0 {
|
if len(members) == 0 {
|
||||||
|
@ -194,8 +191,31 @@ func flattenComplexMembersIfECContainer(cmd *cobra.Command, cnrID cid.ID, member
|
||||||
|
|
||||||
prmHead.SetRawFlag(true) // to get an error instead of whole object
|
prmHead.SetRawFlag(true) // to get an error instead of whole object
|
||||||
|
|
||||||
|
first := members[0]
|
||||||
|
var addrObj oid.Address
|
||||||
|
addrObj.SetContainer(cnrID)
|
||||||
|
addrObj.SetObject(first)
|
||||||
|
prmHead.SetAddress(addrObj)
|
||||||
|
|
||||||
|
_, err := internalclient.HeadObject(cmd.Context(), prmHead)
|
||||||
|
var ecInfoError *objectSDK.ECInfoError
|
||||||
|
if errors.As(err, &ecInfoError) {
|
||||||
|
chunks := getECObjectChunks(cmd, cnrID, first, ecInfoError)
|
||||||
|
result = append(result, chunks...)
|
||||||
|
} else if err == nil { // not EC object, so all members must be phy objects
|
||||||
|
for _, member := range members {
|
||||||
|
result = append(result, phyObject{
|
||||||
|
containerID: cnrID,
|
||||||
|
objectID: member,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
} else {
|
||||||
|
commonCmd.ExitOnErr(cmd, "failed to flatten parts of complex object: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
eg, egCtx := errgroup.WithContext(cmd.Context())
|
eg, egCtx := errgroup.WithContext(cmd.Context())
|
||||||
for idx := range len(members) {
|
for idx := 1; idx < len(members); idx++ {
|
||||||
partObjID := members[idx]
|
partObjID := members[idx]
|
||||||
|
|
||||||
eg.Go(func() error {
|
eg.Go(func() error {
|
||||||
|
@ -205,44 +225,24 @@ func flattenComplexMembersIfECContainer(cmd *cobra.Command, cnrID cid.ID, member
|
||||||
partAddr.SetObject(partObjID)
|
partAddr.SetObject(partObjID)
|
||||||
partHeadPrm.SetAddress(partAddr)
|
partHeadPrm.SetAddress(partAddr)
|
||||||
|
|
||||||
obj, err := internalclient.HeadObject(egCtx, partHeadPrm)
|
_, err := internalclient.HeadObject(egCtx, partHeadPrm)
|
||||||
if err != nil {
|
var ecInfoError *objectSDK.ECInfoError
|
||||||
var ecInfoError *objectSDK.ECInfoError
|
if errors.As(err, &ecInfoError) {
|
||||||
if errors.As(err, &ecInfoError) {
|
chunks := getECObjectChunks(cmd, cnrID, partObjID, ecInfoError)
|
||||||
resultGuard.Lock()
|
|
||||||
defer resultGuard.Unlock()
|
|
||||||
result = append(result, getECObjectChunks(cmd, cnrID, partObjID, ecInfoError)...)
|
|
||||||
hasEC = true
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if obj.Header().Type() != objectSDK.TypeRegular {
|
resultGuard.Lock()
|
||||||
commonCmd.ExitOnErr(cmd, "failed to flatten parts of complex object: %w", fmt.Errorf("object '%s' with type '%s' is not supported as part of complex object", partAddr, obj.Header().Type()))
|
defer resultGuard.Unlock()
|
||||||
}
|
result = append(result, chunks...)
|
||||||
|
|
||||||
if len(obj.Header().Children()) > 0 {
|
|
||||||
// linking object is not data object, so skip it
|
|
||||||
return nil
|
return nil
|
||||||
|
} else if err == nil {
|
||||||
|
return errMalformedComplexObject
|
||||||
}
|
}
|
||||||
|
return err
|
||||||
resultGuard.Lock()
|
|
||||||
defer resultGuard.Unlock()
|
|
||||||
result = append(result, phyObject{
|
|
||||||
containerID: cnrID,
|
|
||||||
objectID: partObjID,
|
|
||||||
})
|
|
||||||
hasNonEC = true
|
|
||||||
|
|
||||||
return nil
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
commonCmd.ExitOnErr(cmd, "failed to flatten parts of complex object: %w", eg.Wait())
|
commonCmd.ExitOnErr(cmd, "failed to flatten parts of complex object: %w", eg.Wait())
|
||||||
if hasEC && hasNonEC {
|
|
||||||
commonCmd.ExitOnErr(cmd, "failed to flatten parts of complex object: %w", errMalformedComplexObject)
|
|
||||||
}
|
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -393,6 +393,8 @@ func getActualPlacement(cmd *cobra.Command, netmap *netmapSDK.NetMap, pk *ecdsa.
|
||||||
|
|
||||||
eg, egCtx := errgroup.WithContext(cmd.Context())
|
eg, egCtx := errgroup.WithContext(cmd.Context())
|
||||||
for _, cand := range candidates {
|
for _, cand := range candidates {
|
||||||
|
cand := cand
|
||||||
|
|
||||||
eg.Go(func() error {
|
eg.Go(func() error {
|
||||||
cli, err := createClient(egCtx, cmd, cand, pk)
|
cli, err := createClient(egCtx, cmd, cand, pk)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -403,6 +405,7 @@ func getActualPlacement(cmd *cobra.Command, netmap *netmapSDK.NetMap, pk *ecdsa.
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, object := range objects {
|
for _, object := range objects {
|
||||||
|
object := object
|
||||||
eg.Go(func() error {
|
eg.Go(func() error {
|
||||||
stored, err := isObjectStoredOnNode(egCtx, cmd, object.containerID, object.objectID, cli, pk)
|
stored, err := isObjectStoredOnNode(egCtx, cmd, object.containerID, object.objectID, cli, pk)
|
||||||
resultMtx.Lock()
|
resultMtx.Lock()
|
||||||
|
@ -446,20 +449,11 @@ func getNodesToCheckObjectExistance(cmd *cobra.Command, netmap *netmapSDK.NetMap
|
||||||
func createClient(ctx context.Context, cmd *cobra.Command, candidate netmapSDK.NodeInfo, pk *ecdsa.PrivateKey) (*client.Client, error) {
|
func createClient(ctx context.Context, cmd *cobra.Command, candidate netmapSDK.NodeInfo, pk *ecdsa.PrivateKey) (*client.Client, error) {
|
||||||
var cli *client.Client
|
var cli *client.Client
|
||||||
var addresses []string
|
var addresses []string
|
||||||
if preferInternal, _ := cmd.Flags().GetBool(preferInternalAddressesFlag); preferInternal {
|
candidate.IterateNetworkEndpoints(func(s string) bool {
|
||||||
candidate.IterateNetworkEndpoints(func(s string) bool {
|
addresses = append(addresses, s)
|
||||||
addresses = append(addresses, s)
|
return false
|
||||||
return false
|
})
|
||||||
})
|
addresses = append(addresses, candidate.ExternalAddresses()...)
|
||||||
addresses = append(addresses, candidate.ExternalAddresses()...)
|
|
||||||
} else {
|
|
||||||
addresses = append(addresses, candidate.ExternalAddresses()...)
|
|
||||||
candidate.IterateNetworkEndpoints(func(s string) bool {
|
|
||||||
addresses = append(addresses, s)
|
|
||||||
return false
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
var lastErr error
|
var lastErr error
|
||||||
for _, address := range addresses {
|
for _, address := range addresses {
|
||||||
var networkAddr network.Address
|
var networkAddr network.Address
|
||||||
|
|
|
@ -1,151 +0,0 @@
|
||||||
package object
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
|
|
||||||
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
|
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
|
||||||
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
|
||||||
"github.com/spf13/cobra"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
newAttrsFlagName = "new-attrs"
|
|
||||||
replaceAttrsFlagName = "replace-attrs"
|
|
||||||
rangeFlagName = "range"
|
|
||||||
payloadFlagName = "payload"
|
|
||||||
)
|
|
||||||
|
|
||||||
var objectPatchCmd = &cobra.Command{
|
|
||||||
Use: "patch",
|
|
||||||
Run: patch,
|
|
||||||
Short: "Patch FrostFS object",
|
|
||||||
Long: "Patch FrostFS object. Each range passed to the command requires to pass a corresponding patch payload.",
|
|
||||||
Example: `
|
|
||||||
frostfs-cli -c config.yml -r 127.0.0.1:8080 object patch --cid <CID> --oid <OID> --new-attrs 'key1=val1,key2=val2' --replace-attrs
|
|
||||||
frostfs-cli -c config.yml -r 127.0.0.1:8080 object patch --cid <CID> --oid <OID> --range offX:lnX --payload /path/to/payloadX --range offY:lnY --payload /path/to/payloadY
|
|
||||||
frostfs-cli -c config.yml -r 127.0.0.1:8080 object patch --cid <CID> --oid <OID> --new-attrs 'key1=val1,key2=val2' --replace-attrs --range offX:lnX --payload /path/to/payload
|
|
||||||
`,
|
|
||||||
}
|
|
||||||
|
|
||||||
func initObjectPatchCmd() {
|
|
||||||
commonflags.Init(objectPatchCmd)
|
|
||||||
initFlagSession(objectPatchCmd, "PATCH")
|
|
||||||
|
|
||||||
flags := objectPatchCmd.Flags()
|
|
||||||
|
|
||||||
flags.String(commonflags.CIDFlag, "", commonflags.CIDFlagUsage)
|
|
||||||
_ = objectRangeCmd.MarkFlagRequired(commonflags.CIDFlag)
|
|
||||||
|
|
||||||
flags.String(commonflags.OIDFlag, "", commonflags.OIDFlagUsage)
|
|
||||||
_ = objectRangeCmd.MarkFlagRequired(commonflags.OIDFlag)
|
|
||||||
|
|
||||||
flags.String(newAttrsFlagName, "", "New object attributes in form of Key1=Value1,Key2=Value2")
|
|
||||||
flags.Bool(replaceAttrsFlagName, false, "Replace object attributes by new ones.")
|
|
||||||
flags.StringSlice(rangeFlagName, []string{}, "Range to which patch payload is applied. Format: offset:length")
|
|
||||||
flags.StringSlice(payloadFlagName, []string{}, "Path to file with patch payload.")
|
|
||||||
}
|
|
||||||
|
|
||||||
func patch(cmd *cobra.Command, _ []string) {
|
|
||||||
var cnr cid.ID
|
|
||||||
var obj oid.ID
|
|
||||||
|
|
||||||
objAddr := readObjectAddress(cmd, &cnr, &obj)
|
|
||||||
|
|
||||||
ranges, err := getRangeSlice(cmd)
|
|
||||||
commonCmd.ExitOnErr(cmd, "", err)
|
|
||||||
|
|
||||||
payloads := patchPayloadPaths(cmd)
|
|
||||||
|
|
||||||
if len(ranges) != len(payloads) {
|
|
||||||
commonCmd.ExitOnErr(cmd, "", fmt.Errorf("the number of ranges and payloads are not equal: ranges = %d, payloads = %d", len(ranges), len(payloads)))
|
|
||||||
}
|
|
||||||
|
|
||||||
newAttrs, err := parseNewObjectAttrs(cmd)
|
|
||||||
commonCmd.ExitOnErr(cmd, "can't parse new object attributes: %w", err)
|
|
||||||
replaceAttrs, _ := cmd.Flags().GetBool(replaceAttrsFlagName)
|
|
||||||
|
|
||||||
pk := key.GetOrGenerate(cmd)
|
|
||||||
|
|
||||||
cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC)
|
|
||||||
|
|
||||||
var prm internalclient.PatchObjectPrm
|
|
||||||
prm.SetClient(cli)
|
|
||||||
Prepare(cmd, &prm)
|
|
||||||
ReadOrOpenSession(cmd, &prm, pk, cnr, nil)
|
|
||||||
|
|
||||||
prm.SetAddress(objAddr)
|
|
||||||
prm.NewAttributes = newAttrs
|
|
||||||
prm.ReplaceAttribute = replaceAttrs
|
|
||||||
|
|
||||||
for i := range ranges {
|
|
||||||
prm.PayloadPatches = append(prm.PayloadPatches, internalclient.PayloadPatch{
|
|
||||||
Range: ranges[i],
|
|
||||||
PayloadPath: payloads[i],
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
res, err := internalclient.Patch(cmd.Context(), prm)
|
|
||||||
if err != nil {
|
|
||||||
commonCmd.ExitOnErr(cmd, "can't patch the object: %w", err)
|
|
||||||
}
|
|
||||||
cmd.Println("Patched object ID: ", res.OID.EncodeToString())
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseNewObjectAttrs(cmd *cobra.Command) ([]objectSDK.Attribute, error) {
|
|
||||||
var rawAttrs []string
|
|
||||||
|
|
||||||
raw := cmd.Flag(newAttrsFlagName).Value.String()
|
|
||||||
if len(raw) != 0 {
|
|
||||||
rawAttrs = strings.Split(raw, ",")
|
|
||||||
}
|
|
||||||
|
|
||||||
attrs := make([]objectSDK.Attribute, len(rawAttrs), len(rawAttrs)+2) // name + timestamp attributes
|
|
||||||
for i := range rawAttrs {
|
|
||||||
k, v, found := strings.Cut(rawAttrs[i], "=")
|
|
||||||
if !found {
|
|
||||||
return nil, fmt.Errorf("invalid attribute format: %s", rawAttrs[i])
|
|
||||||
}
|
|
||||||
attrs[i].SetKey(k)
|
|
||||||
attrs[i].SetValue(v)
|
|
||||||
}
|
|
||||||
return attrs, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func getRangeSlice(cmd *cobra.Command) ([]objectSDK.Range, error) {
|
|
||||||
v, _ := cmd.Flags().GetStringSlice(rangeFlagName)
|
|
||||||
if len(v) == 0 {
|
|
||||||
return []objectSDK.Range{}, nil
|
|
||||||
}
|
|
||||||
rs := make([]objectSDK.Range, len(v))
|
|
||||||
for i := range v {
|
|
||||||
before, after, found := strings.Cut(v[i], rangeSep)
|
|
||||||
if !found {
|
|
||||||
return nil, fmt.Errorf("invalid range specifier: %s", v[i])
|
|
||||||
}
|
|
||||||
|
|
||||||
offset, err := strconv.ParseUint(before, 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("invalid '%s' range offset specifier: %w", v[i], err)
|
|
||||||
}
|
|
||||||
length, err := strconv.ParseUint(after, 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("invalid '%s' range length specifier: %w", v[i], err)
|
|
||||||
}
|
|
||||||
|
|
||||||
rs[i].SetOffset(offset)
|
|
||||||
rs[i].SetLength(length)
|
|
||||||
}
|
|
||||||
return rs, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func patchPayloadPaths(cmd *cobra.Command) []string {
|
|
||||||
v, _ := cmd.Flags().GetStringSlice(payloadFlagName)
|
|
||||||
return v
|
|
||||||
}
|
|
|
@ -29,7 +29,6 @@ func init() {
|
||||||
objectRangeCmd,
|
objectRangeCmd,
|
||||||
objectLockCmd,
|
objectLockCmd,
|
||||||
objectNodesCmd,
|
objectNodesCmd,
|
||||||
objectPatchCmd,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Cmd.AddCommand(objectChildCommands...)
|
Cmd.AddCommand(objectChildCommands...)
|
||||||
|
@ -40,7 +39,6 @@ func init() {
|
||||||
}
|
}
|
||||||
|
|
||||||
initObjectPutCmd()
|
initObjectPutCmd()
|
||||||
initObjectPatchCmd()
|
|
||||||
initObjectDeleteCmd()
|
initObjectDeleteCmd()
|
||||||
initObjectGetCmd()
|
initObjectGetCmd()
|
||||||
initObjectSearchCmd()
|
initObjectSearchCmd()
|
||||||
|
|
|
@ -306,8 +306,6 @@ func finalizeSession(cmd *cobra.Command, dst SessionPrm, tok *session.Object, ke
|
||||||
case *internal.PutObjectPrm:
|
case *internal.PutObjectPrm:
|
||||||
common.PrintVerbose(cmd, "Binding session to object PUT...")
|
common.PrintVerbose(cmd, "Binding session to object PUT...")
|
||||||
tok.ForVerb(session.VerbObjectPut)
|
tok.ForVerb(session.VerbObjectPut)
|
||||||
case *internal.PatchObjectPrm:
|
|
||||||
tok.ForVerb(session.VerbObjectPatch)
|
|
||||||
case *internal.DeleteObjectPrm:
|
case *internal.DeleteObjectPrm:
|
||||||
common.PrintVerbose(cmd, "Binding session to object DELETE...")
|
common.PrintVerbose(cmd, "Binding session to object DELETE...")
|
||||||
tok.ForVerb(session.VerbObjectDelete)
|
tok.ForVerb(session.VerbObjectDelete)
|
||||||
|
@ -374,7 +372,7 @@ func collectObjectRelatives(cmd *cobra.Command, cli *client.Client, cnr cid.ID,
|
||||||
common.PrintVerbose(cmd, "Split information received - object is virtual.")
|
common.PrintVerbose(cmd, "Split information received - object is virtual.")
|
||||||
splitInfo := errSplit.SplitInfo()
|
splitInfo := errSplit.SplitInfo()
|
||||||
|
|
||||||
if members, ok := tryGetSplitMembersByLinkingObject(cmd, splitInfo, prmHead, cnr); ok {
|
if members, ok := tryGetSplitMembersByLinkingObject(cmd, splitInfo, prmHead, cnr, true); ok {
|
||||||
return members
|
return members
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -390,7 +388,7 @@ func collectObjectRelatives(cmd *cobra.Command, cli *client.Client, cnr cid.ID,
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func tryGetSplitMembersByLinkingObject(cmd *cobra.Command, splitInfo *objectSDK.SplitInfo, prmHead internal.HeadObjectPrm, cnr cid.ID) ([]oid.ID, bool) {
|
func tryGetSplitMembersByLinkingObject(cmd *cobra.Command, splitInfo *objectSDK.SplitInfo, prmHead internal.HeadObjectPrm, cnr cid.ID, withLinking bool) ([]oid.ID, bool) {
|
||||||
// collect split chain by the descending ease of operations (ease is evaluated heuristically).
|
// collect split chain by the descending ease of operations (ease is evaluated heuristically).
|
||||||
// If any approach fails, we don't try the next since we assume that it will fail too.
|
// If any approach fails, we don't try the next since we assume that it will fail too.
|
||||||
|
|
||||||
|
@ -411,7 +409,10 @@ func tryGetSplitMembersByLinkingObject(cmd *cobra.Command, splitInfo *objectSDK.
|
||||||
|
|
||||||
common.PrintVerbose(cmd, "Received split members from the linking object: %v", children)
|
common.PrintVerbose(cmd, "Received split members from the linking object: %v", children)
|
||||||
|
|
||||||
return append(children, idLinking), true
|
if withLinking {
|
||||||
|
return append(children, idLinking), true
|
||||||
|
}
|
||||||
|
return children, true
|
||||||
}
|
}
|
||||||
|
|
||||||
// linking object is not required for
|
// linking object is not required for
|
||||||
|
|
|
@ -47,10 +47,9 @@ func add(cmd *cobra.Command, _ []string) {
|
||||||
meta, err := parseMeta(cmd)
|
meta, err := parseMeta(cmd)
|
||||||
commonCmd.ExitOnErr(cmd, "meta data parsing: %w", err)
|
commonCmd.ExitOnErr(cmd, "meta data parsing: %w", err)
|
||||||
|
|
||||||
ctx, cancel := contextWithTimeout(cmd)
|
ctx := cmd.Context()
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
cli, err := _client()
|
cli, err := _client(ctx)
|
||||||
commonCmd.ExitOnErr(cmd, "failed to create client: %w", err)
|
commonCmd.ExitOnErr(cmd, "failed to create client: %w", err)
|
||||||
|
|
||||||
rawCID := make([]byte, sha256.Size)
|
rawCID := make([]byte, sha256.Size)
|
||||||
|
@ -78,13 +77,13 @@ func add(cmd *cobra.Command, _ []string) {
|
||||||
cmd.Println("Node ID: ", resp.GetBody().GetNodeId())
|
cmd.Println("Node ID: ", resp.GetBody().GetNodeId())
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseMeta(cmd *cobra.Command) ([]tree.KeyValue, error) {
|
func parseMeta(cmd *cobra.Command) ([]*tree.KeyValue, error) {
|
||||||
raws, _ := cmd.Flags().GetStringSlice(metaFlagKey)
|
raws, _ := cmd.Flags().GetStringSlice(metaFlagKey)
|
||||||
if len(raws) == 0 {
|
if len(raws) == 0 {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
pairs := make([]tree.KeyValue, 0, len(raws))
|
pairs := make([]*tree.KeyValue, 0, len(raws))
|
||||||
for i := range raws {
|
for i := range raws {
|
||||||
k, v, found := strings.Cut(raws[i], "=")
|
k, v, found := strings.Cut(raws[i], "=")
|
||||||
if !found {
|
if !found {
|
||||||
|
@ -95,7 +94,7 @@ func parseMeta(cmd *cobra.Command) ([]tree.KeyValue, error) {
|
||||||
pair.Key = k
|
pair.Key = k
|
||||||
pair.Value = []byte(v)
|
pair.Value = []byte(v)
|
||||||
|
|
||||||
pairs = append(pairs, pair)
|
pairs = append(pairs, &pair)
|
||||||
}
|
}
|
||||||
|
|
||||||
return pairs, nil
|
return pairs, nil
|
||||||
|
|
|
@ -50,10 +50,9 @@ func addByPath(cmd *cobra.Command, _ []string) {
|
||||||
commonCmd.ExitOnErr(cmd, "decode container ID string: %w", err)
|
commonCmd.ExitOnErr(cmd, "decode container ID string: %w", err)
|
||||||
|
|
||||||
tid, _ := cmd.Flags().GetString(treeIDFlagKey)
|
tid, _ := cmd.Flags().GetString(treeIDFlagKey)
|
||||||
ctx, cancel := contextWithTimeout(cmd)
|
ctx := cmd.Context()
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
cli, err := _client()
|
cli, err := _client(ctx)
|
||||||
commonCmd.ExitOnErr(cmd, "failed to create client: %w", err)
|
commonCmd.ExitOnErr(cmd, "failed to create client: %w", err)
|
||||||
|
|
||||||
rawCID := make([]byte, sha256.Size)
|
rawCID := make([]byte, sha256.Size)
|
||||||
|
|
|
@ -3,14 +3,13 @@ package tree
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree"
|
||||||
metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc"
|
metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc"
|
||||||
tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc"
|
tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc"
|
||||||
"github.com/spf13/cobra"
|
|
||||||
"github.com/spf13/viper"
|
"github.com/spf13/viper"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
"google.golang.org/grpc/credentials/insecure"
|
"google.golang.org/grpc/credentials/insecure"
|
||||||
|
@ -18,7 +17,7 @@ import (
|
||||||
|
|
||||||
// _client returns grpc Tree service client. Should be removed
|
// _client returns grpc Tree service client. Should be removed
|
||||||
// after making Tree API public.
|
// after making Tree API public.
|
||||||
func _client() (tree.TreeServiceClient, error) {
|
func _client(ctx context.Context) (tree.TreeServiceClient, error) {
|
||||||
var netAddr network.Address
|
var netAddr network.Address
|
||||||
err := netAddr.FromString(viper.GetString(commonflags.RPC))
|
err := netAddr.FromString(viper.GetString(commonflags.RPC))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -26,6 +25,7 @@ func _client() (tree.TreeServiceClient, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
opts := []grpc.DialOption{
|
opts := []grpc.DialOption{
|
||||||
|
grpc.WithBlock(),
|
||||||
grpc.WithChainUnaryInterceptor(
|
grpc.WithChainUnaryInterceptor(
|
||||||
metrics.NewUnaryClientInterceptor(),
|
metrics.NewUnaryClientInterceptor(),
|
||||||
tracing.NewUnaryClientInteceptor(),
|
tracing.NewUnaryClientInteceptor(),
|
||||||
|
@ -40,14 +40,12 @@ func _client() (tree.TreeServiceClient, error) {
|
||||||
opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials()))
|
opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials()))
|
||||||
}
|
}
|
||||||
|
|
||||||
cc, err := grpc.NewClient(netAddr.URIAddr(), opts...)
|
// a default connection establishing timeout
|
||||||
|
const defaultClientConnectTimeout = time.Second * 2
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(ctx, defaultClientConnectTimeout)
|
||||||
|
cc, err := grpc.DialContext(ctx, netAddr.URIAddr(), opts...)
|
||||||
|
cancel()
|
||||||
|
|
||||||
return tree.NewTreeServiceClient(cc), err
|
return tree.NewTreeServiceClient(cc), err
|
||||||
}
|
}
|
||||||
|
|
||||||
func contextWithTimeout(cmd *cobra.Command) (context.Context, context.CancelFunc) {
|
|
||||||
if timeout := viper.GetDuration(commonflags.Timeout); timeout > 0 {
|
|
||||||
common.PrintVerbose(cmd, "Set request timeout to %s.", timeout)
|
|
||||||
return context.WithTimeout(cmd.Context(), timeout)
|
|
||||||
}
|
|
||||||
return context.WithTimeout(cmd.Context(), commonflags.TimeoutDefault)
|
|
||||||
}
|
|
||||||
|
|
|
@ -50,10 +50,9 @@ func getByPath(cmd *cobra.Command, _ []string) {
|
||||||
commonCmd.ExitOnErr(cmd, "decode container ID string: %w", err)
|
commonCmd.ExitOnErr(cmd, "decode container ID string: %w", err)
|
||||||
|
|
||||||
tid, _ := cmd.Flags().GetString(treeIDFlagKey)
|
tid, _ := cmd.Flags().GetString(treeIDFlagKey)
|
||||||
ctx, cancel := contextWithTimeout(cmd)
|
ctx := cmd.Context()
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
cli, err := _client()
|
cli, err := _client(ctx)
|
||||||
commonCmd.ExitOnErr(cmd, "failed to create client: %w", err)
|
commonCmd.ExitOnErr(cmd, "failed to create client: %w", err)
|
||||||
|
|
||||||
rawCID := make([]byte, sha256.Size)
|
rawCID := make([]byte, sha256.Size)
|
||||||
|
|
|
@ -44,10 +44,9 @@ func getOpLog(cmd *cobra.Command, _ []string) {
|
||||||
commonCmd.ExitOnErr(cmd, "decode container ID string: %w", err)
|
commonCmd.ExitOnErr(cmd, "decode container ID string: %w", err)
|
||||||
|
|
||||||
tid, _ := cmd.Flags().GetString(treeIDFlagKey)
|
tid, _ := cmd.Flags().GetString(treeIDFlagKey)
|
||||||
ctx, cancel := contextWithTimeout(cmd)
|
ctx := cmd.Context()
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
cli, err := _client()
|
cli, err := _client(ctx)
|
||||||
commonCmd.ExitOnErr(cmd, "failed to create client: %w", err)
|
commonCmd.ExitOnErr(cmd, "failed to create client: %w", err)
|
||||||
|
|
||||||
rawCID := make([]byte, sha256.Size)
|
rawCID := make([]byte, sha256.Size)
|
||||||
|
|
|
@ -26,10 +26,9 @@ func initHealthcheckCmd() {
|
||||||
|
|
||||||
func healthcheck(cmd *cobra.Command, _ []string) {
|
func healthcheck(cmd *cobra.Command, _ []string) {
|
||||||
pk := key.GetOrGenerate(cmd)
|
pk := key.GetOrGenerate(cmd)
|
||||||
ctx, cancel := contextWithTimeout(cmd)
|
ctx := cmd.Context()
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
cli, err := _client()
|
cli, err := _client(ctx)
|
||||||
commonCmd.ExitOnErr(cmd, "failed to create client: %w", err)
|
commonCmd.ExitOnErr(cmd, "failed to create client: %w", err)
|
||||||
|
|
||||||
req := &tree.HealthcheckRequest{
|
req := &tree.HealthcheckRequest{
|
||||||
|
|
|
@ -38,10 +38,9 @@ func list(cmd *cobra.Command, _ []string) {
|
||||||
err := cnr.DecodeString(cidString)
|
err := cnr.DecodeString(cidString)
|
||||||
commonCmd.ExitOnErr(cmd, "decode container ID string: %w", err)
|
commonCmd.ExitOnErr(cmd, "decode container ID string: %w", err)
|
||||||
|
|
||||||
ctx, cancel := contextWithTimeout(cmd)
|
ctx := cmd.Context()
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
cli, err := _client()
|
cli, err := _client(ctx)
|
||||||
commonCmd.ExitOnErr(cmd, "failed to create client: %w", err)
|
commonCmd.ExitOnErr(cmd, "failed to create client: %w", err)
|
||||||
|
|
||||||
rawCID := make([]byte, sha256.Size)
|
rawCID := make([]byte, sha256.Size)
|
||||||
|
|
|
@ -45,10 +45,9 @@ func move(cmd *cobra.Command, _ []string) {
|
||||||
err := cnr.DecodeString(cidString)
|
err := cnr.DecodeString(cidString)
|
||||||
commonCmd.ExitOnErr(cmd, "decode container ID string: %w", err)
|
commonCmd.ExitOnErr(cmd, "decode container ID string: %w", err)
|
||||||
|
|
||||||
ctx, cancel := contextWithTimeout(cmd)
|
ctx := cmd.Context()
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
cli, err := _client()
|
cli, err := _client(ctx)
|
||||||
commonCmd.ExitOnErr(cmd, "failed to create client: %w", err)
|
commonCmd.ExitOnErr(cmd, "failed to create client: %w", err)
|
||||||
|
|
||||||
rawCID := make([]byte, sha256.Size)
|
rawCID := make([]byte, sha256.Size)
|
||||||
|
@ -76,7 +75,7 @@ func move(cmd *cobra.Command, _ []string) {
|
||||||
resp, err := cli.GetSubTree(ctx, subTreeReq)
|
resp, err := cli.GetSubTree(ctx, subTreeReq)
|
||||||
commonCmd.ExitOnErr(cmd, "rpc call: %w", err)
|
commonCmd.ExitOnErr(cmd, "rpc call: %w", err)
|
||||||
|
|
||||||
var meta []tree.KeyValue
|
var meta []*tree.KeyValue
|
||||||
subtreeResp, err := resp.Recv()
|
subtreeResp, err := resp.Recv()
|
||||||
for ; err == nil; subtreeResp, err = resp.Recv() {
|
for ; err == nil; subtreeResp, err = resp.Recv() {
|
||||||
meta = subtreeResp.GetBody().GetMeta()
|
meta = subtreeResp.GetBody().GetMeta()
|
||||||
|
|
|
@ -41,10 +41,9 @@ func remove(cmd *cobra.Command, _ []string) {
|
||||||
err := cnr.DecodeString(cidString)
|
err := cnr.DecodeString(cidString)
|
||||||
commonCmd.ExitOnErr(cmd, "decode container ID string: %w", err)
|
commonCmd.ExitOnErr(cmd, "decode container ID string: %w", err)
|
||||||
|
|
||||||
ctx, cancel := contextWithTimeout(cmd)
|
ctx := cmd.Context()
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
cli, err := _client()
|
cli, err := _client(ctx)
|
||||||
commonCmd.ExitOnErr(cmd, "failed to create client: %w", err)
|
commonCmd.ExitOnErr(cmd, "failed to create client: %w", err)
|
||||||
|
|
||||||
rawCID := make([]byte, sha256.Size)
|
rawCID := make([]byte, sha256.Size)
|
||||||
|
|
|
@ -49,7 +49,6 @@ const (
|
||||||
heightFlagKey = "height"
|
heightFlagKey = "height"
|
||||||
countFlagKey = "count"
|
countFlagKey = "count"
|
||||||
depthFlagKey = "depth"
|
depthFlagKey = "depth"
|
||||||
orderFlagKey = "ordered"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func initCTID(cmd *cobra.Command) {
|
func initCTID(cmd *cobra.Command) {
|
||||||
|
|
|
@ -30,7 +30,6 @@ func initGetSubtreeCmd() {
|
||||||
ff := getSubtreeCmd.Flags()
|
ff := getSubtreeCmd.Flags()
|
||||||
ff.Uint64(rootIDFlagKey, 0, "Root ID to traverse from.")
|
ff.Uint64(rootIDFlagKey, 0, "Root ID to traverse from.")
|
||||||
ff.Uint32(depthFlagKey, 10, "Traversal depth.")
|
ff.Uint32(depthFlagKey, 10, "Traversal depth.")
|
||||||
ff.Bool(orderFlagKey, false, "Sort output by ascending FileName.")
|
|
||||||
|
|
||||||
_ = getSubtreeCmd.MarkFlagRequired(commonflags.CIDFlag)
|
_ = getSubtreeCmd.MarkFlagRequired(commonflags.CIDFlag)
|
||||||
_ = getSubtreeCmd.MarkFlagRequired(treeIDFlagKey)
|
_ = getSubtreeCmd.MarkFlagRequired(treeIDFlagKey)
|
||||||
|
@ -46,10 +45,9 @@ func getSubTree(cmd *cobra.Command, _ []string) {
|
||||||
err := cnr.DecodeString(cidString)
|
err := cnr.DecodeString(cidString)
|
||||||
commonCmd.ExitOnErr(cmd, "decode container ID string: %w", err)
|
commonCmd.ExitOnErr(cmd, "decode container ID string: %w", err)
|
||||||
|
|
||||||
ctx, cancel := contextWithTimeout(cmd)
|
ctx := cmd.Context()
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
cli, err := _client()
|
cli, err := _client(ctx)
|
||||||
commonCmd.ExitOnErr(cmd, "failed to create client: %w", err)
|
commonCmd.ExitOnErr(cmd, "failed to create client: %w", err)
|
||||||
|
|
||||||
rawCID := make([]byte, sha256.Size)
|
rawCID := make([]byte, sha256.Size)
|
||||||
|
@ -61,13 +59,6 @@ func getSubTree(cmd *cobra.Command, _ []string) {
|
||||||
|
|
||||||
depth, _ := cmd.Flags().GetUint32(depthFlagKey)
|
depth, _ := cmd.Flags().GetUint32(depthFlagKey)
|
||||||
|
|
||||||
order, _ := cmd.Flags().GetBool(orderFlagKey)
|
|
||||||
|
|
||||||
bodyOrder := tree.GetSubTreeRequest_Body_Order_None
|
|
||||||
if order {
|
|
||||||
bodyOrder = tree.GetSubTreeRequest_Body_Order_Asc
|
|
||||||
}
|
|
||||||
|
|
||||||
var bt []byte
|
var bt []byte
|
||||||
if t := common.ReadBearerToken(cmd, bearerFlagKey); t != nil {
|
if t := common.ReadBearerToken(cmd, bearerFlagKey); t != nil {
|
||||||
bt = t.Marshal()
|
bt = t.Marshal()
|
||||||
|
@ -80,9 +71,6 @@ func getSubTree(cmd *cobra.Command, _ []string) {
|
||||||
RootId: []uint64{rid},
|
RootId: []uint64{rid},
|
||||||
Depth: depth,
|
Depth: depth,
|
||||||
BearerToken: bt,
|
BearerToken: bt,
|
||||||
OrderBy: &tree.GetSubTreeRequest_Body_Order{
|
|
||||||
Direction: bodyOrder,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -33,7 +33,7 @@ func PrettyPrintTableBACL(cmd *cobra.Command, bacl *acl.Basic) {
|
||||||
fmt.Fprintln(w, strings.Join(bits, "\t"))
|
fmt.Fprintln(w, strings.Join(bits, "\t"))
|
||||||
// Footer
|
// Footer
|
||||||
footer := []string{"X F"}
|
footer := []string{"X F"}
|
||||||
for range 7 {
|
for i := 0; i < 7; i++ {
|
||||||
footer = append(footer, "U S O B")
|
footer = append(footer, "U S O B")
|
||||||
}
|
}
|
||||||
fmt.Fprintln(w, strings.Join(footer, "\t"))
|
fmt.Fprintln(w, strings.Join(footer, "\t"))
|
||||||
|
|
|
@ -239,8 +239,6 @@ func parseAction(lexeme string) ([]string, bool, error) {
|
||||||
return []string{nativeschema.MethodRangeObject}, true, nil
|
return []string{nativeschema.MethodRangeObject}, true, nil
|
||||||
case "object.hash":
|
case "object.hash":
|
||||||
return []string{nativeschema.MethodHashObject}, true, nil
|
return []string{nativeschema.MethodHashObject}, true, nil
|
||||||
case "object.patch":
|
|
||||||
return []string{nativeschema.MethodPatchObject}, true, nil
|
|
||||||
case "object.*":
|
case "object.*":
|
||||||
return []string{
|
return []string{
|
||||||
nativeschema.MethodPutObject,
|
nativeschema.MethodPutObject,
|
||||||
|
@ -250,7 +248,6 @@ func parseAction(lexeme string) ([]string, bool, error) {
|
||||||
nativeschema.MethodSearchObject,
|
nativeschema.MethodSearchObject,
|
||||||
nativeschema.MethodRangeObject,
|
nativeschema.MethodRangeObject,
|
||||||
nativeschema.MethodHashObject,
|
nativeschema.MethodHashObject,
|
||||||
nativeschema.MethodPatchObject,
|
|
||||||
}, true, nil
|
}, true, nil
|
||||||
case "container.put":
|
case "container.put":
|
||||||
return []string{nativeschema.MethodPutContainer}, false, nil
|
return []string{nativeschema.MethodPutContainer}, false, nil
|
||||||
|
@ -258,6 +255,10 @@ func parseAction(lexeme string) ([]string, bool, error) {
|
||||||
return []string{nativeschema.MethodDeleteContainer}, false, nil
|
return []string{nativeschema.MethodDeleteContainer}, false, nil
|
||||||
case "container.get":
|
case "container.get":
|
||||||
return []string{nativeschema.MethodGetContainer}, false, nil
|
return []string{nativeschema.MethodGetContainer}, false, nil
|
||||||
|
case "container.setcontainereacl":
|
||||||
|
return []string{nativeschema.MethodSetContainerEACL}, false, nil
|
||||||
|
case "container.getcontainereacl":
|
||||||
|
return []string{nativeschema.MethodGetContainerEACL}, false, nil
|
||||||
case "container.list":
|
case "container.list":
|
||||||
return []string{nativeschema.MethodListContainers}, false, nil
|
return []string{nativeschema.MethodListContainers}, false, nil
|
||||||
case "container.*":
|
case "container.*":
|
||||||
|
@ -265,6 +266,8 @@ func parseAction(lexeme string) ([]string, bool, error) {
|
||||||
nativeschema.MethodPutContainer,
|
nativeschema.MethodPutContainer,
|
||||||
nativeschema.MethodDeleteContainer,
|
nativeschema.MethodDeleteContainer,
|
||||||
nativeschema.MethodGetContainer,
|
nativeschema.MethodGetContainer,
|
||||||
|
nativeschema.MethodSetContainerEACL,
|
||||||
|
nativeschema.MethodGetContainerEACL,
|
||||||
nativeschema.MethodListContainers,
|
nativeschema.MethodListContainers,
|
||||||
}, false, nil
|
}, false, nil
|
||||||
default:
|
default:
|
||||||
|
|
18
cmd/frostfs-cli/modules/util/locode.go
Normal file
18
cmd/frostfs-cli/modules/util/locode.go
Normal file
|
@ -0,0 +1,18 @@
|
||||||
|
package util
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
// locode section.
|
||||||
|
var locodeCmd = &cobra.Command{
|
||||||
|
Use: "locode",
|
||||||
|
Short: "Working with FrostFS UN/LOCODE database",
|
||||||
|
}
|
||||||
|
|
||||||
|
func initLocodeCmd() {
|
||||||
|
locodeCmd.AddCommand(locodeGenerateCmd, locodeInfoCmd)
|
||||||
|
|
||||||
|
initUtilLocodeInfoCmd()
|
||||||
|
initUtilLocodeGenerateCmd()
|
||||||
|
}
|
96
cmd/frostfs-cli/modules/util/locode_generate.go
Normal file
96
cmd/frostfs-cli/modules/util/locode_generate.go
Normal file
|
@ -0,0 +1,96 @@
|
||||||
|
package util
|
||||||
|
|
||||||
|
import (
|
||||||
|
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
|
||||||
|
locodedb "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode/db"
|
||||||
|
airportsdb "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode/db/airports"
|
||||||
|
locodebolt "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode/db/boltdb"
|
||||||
|
continentsdb "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode/db/continents/geojson"
|
||||||
|
csvlocode "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode/table/csv"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
type namesDB struct {
|
||||||
|
*airportsdb.DB
|
||||||
|
*csvlocode.Table
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
locodeGenerateInputFlag = "in"
|
||||||
|
locodeGenerateSubDivFlag = "subdiv"
|
||||||
|
locodeGenerateAirportsFlag = "airports"
|
||||||
|
locodeGenerateCountriesFlag = "countries"
|
||||||
|
locodeGenerateContinentsFlag = "continents"
|
||||||
|
locodeGenerateOutputFlag = "out"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
locodeGenerateInPaths []string
|
||||||
|
locodeGenerateSubDivPath string
|
||||||
|
locodeGenerateAirportsPath string
|
||||||
|
locodeGenerateCountriesPath string
|
||||||
|
locodeGenerateContinentsPath string
|
||||||
|
locodeGenerateOutPath string
|
||||||
|
|
||||||
|
locodeGenerateCmd = &cobra.Command{
|
||||||
|
Use: "generate",
|
||||||
|
Short: "Generate UN/LOCODE database for FrostFS",
|
||||||
|
Run: func(cmd *cobra.Command, _ []string) {
|
||||||
|
locodeDB := csvlocode.New(
|
||||||
|
csvlocode.Prm{
|
||||||
|
Path: locodeGenerateInPaths[0],
|
||||||
|
SubDivPath: locodeGenerateSubDivPath,
|
||||||
|
},
|
||||||
|
csvlocode.WithExtraPaths(locodeGenerateInPaths[1:]...),
|
||||||
|
)
|
||||||
|
|
||||||
|
airportDB := airportsdb.New(airportsdb.Prm{
|
||||||
|
AirportsPath: locodeGenerateAirportsPath,
|
||||||
|
CountriesPath: locodeGenerateCountriesPath,
|
||||||
|
})
|
||||||
|
|
||||||
|
continentsDB := continentsdb.New(continentsdb.Prm{
|
||||||
|
Path: locodeGenerateContinentsPath,
|
||||||
|
})
|
||||||
|
|
||||||
|
targetDB := locodebolt.New(locodebolt.Prm{
|
||||||
|
Path: locodeGenerateOutPath,
|
||||||
|
})
|
||||||
|
|
||||||
|
err := targetDB.Open()
|
||||||
|
commonCmd.ExitOnErr(cmd, "", err)
|
||||||
|
|
||||||
|
defer targetDB.Close()
|
||||||
|
|
||||||
|
names := &namesDB{
|
||||||
|
DB: airportDB,
|
||||||
|
Table: locodeDB,
|
||||||
|
}
|
||||||
|
|
||||||
|
err = locodedb.FillDatabase(locodeDB, airportDB, continentsDB, names, targetDB)
|
||||||
|
commonCmd.ExitOnErr(cmd, "", err)
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
func initUtilLocodeGenerateCmd() {
|
||||||
|
flags := locodeGenerateCmd.Flags()
|
||||||
|
|
||||||
|
flags.StringSliceVar(&locodeGenerateInPaths, locodeGenerateInputFlag, nil, "List of paths to UN/LOCODE tables (csv)")
|
||||||
|
_ = locodeGenerateCmd.MarkFlagRequired(locodeGenerateInputFlag)
|
||||||
|
|
||||||
|
flags.StringVar(&locodeGenerateSubDivPath, locodeGenerateSubDivFlag, "", "Path to UN/LOCODE subdivision database (csv)")
|
||||||
|
_ = locodeGenerateCmd.MarkFlagRequired(locodeGenerateSubDivFlag)
|
||||||
|
|
||||||
|
flags.StringVar(&locodeGenerateAirportsPath, locodeGenerateAirportsFlag, "", "Path to OpenFlights airport database (csv)")
|
||||||
|
_ = locodeGenerateCmd.MarkFlagRequired(locodeGenerateAirportsFlag)
|
||||||
|
|
||||||
|
flags.StringVar(&locodeGenerateCountriesPath, locodeGenerateCountriesFlag, "", "Path to OpenFlights country database (csv)")
|
||||||
|
_ = locodeGenerateCmd.MarkFlagRequired(locodeGenerateCountriesFlag)
|
||||||
|
|
||||||
|
flags.StringVar(&locodeGenerateContinentsPath, locodeGenerateContinentsFlag, "", "Path to continent polygons (GeoJSON)")
|
||||||
|
_ = locodeGenerateCmd.MarkFlagRequired(locodeGenerateContinentsFlag)
|
||||||
|
|
||||||
|
flags.StringVar(&locodeGenerateOutPath, locodeGenerateOutputFlag, "", "Target path for generated database")
|
||||||
|
_ = locodeGenerateCmd.MarkFlagRequired(locodeGenerateOutputFlag)
|
||||||
|
}
|
56
cmd/frostfs-cli/modules/util/locode_info.go
Normal file
56
cmd/frostfs-cli/modules/util/locode_info.go
Normal file
|
@ -0,0 +1,56 @@
|
||||||
|
package util
|
||||||
|
|
||||||
|
import (
|
||||||
|
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
|
||||||
|
locodedb "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode/db"
|
||||||
|
locodebolt "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode/db/boltdb"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
locodeInfoDBFlag = "db"
|
||||||
|
locodeInfoCodeFlag = "locode"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
locodeInfoDBPath string
|
||||||
|
locodeInfoCode string
|
||||||
|
|
||||||
|
locodeInfoCmd = &cobra.Command{
|
||||||
|
Use: "info",
|
||||||
|
Short: "Print information about UN/LOCODE from FrostFS database",
|
||||||
|
Run: func(cmd *cobra.Command, _ []string) {
|
||||||
|
targetDB := locodebolt.New(locodebolt.Prm{
|
||||||
|
Path: locodeInfoDBPath,
|
||||||
|
}, locodebolt.ReadOnly())
|
||||||
|
|
||||||
|
err := targetDB.Open()
|
||||||
|
commonCmd.ExitOnErr(cmd, "", err)
|
||||||
|
|
||||||
|
defer targetDB.Close()
|
||||||
|
|
||||||
|
record, err := locodedb.LocodeRecord(targetDB, locodeInfoCode)
|
||||||
|
commonCmd.ExitOnErr(cmd, "", err)
|
||||||
|
|
||||||
|
cmd.Printf("Country: %s\n", record.CountryName())
|
||||||
|
cmd.Printf("Location: %s\n", record.LocationName())
|
||||||
|
cmd.Printf("Continent: %s\n", record.Continent())
|
||||||
|
if subDivCode := record.SubDivCode(); subDivCode != "" {
|
||||||
|
cmd.Printf("Subdivision: [%s] %s\n", subDivCode, record.SubDivName())
|
||||||
|
}
|
||||||
|
|
||||||
|
geoPoint := record.GeoPoint()
|
||||||
|
cmd.Printf("Coordinates: %0.2f, %0.2f\n", geoPoint.Latitude(), geoPoint.Longitude())
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
func initUtilLocodeInfoCmd() {
|
||||||
|
flags := locodeInfoCmd.Flags()
|
||||||
|
|
||||||
|
flags.StringVar(&locodeInfoDBPath, locodeInfoDBFlag, "", "Path to FrostFS UN/LOCODE database")
|
||||||
|
_ = locodeInfoCmd.MarkFlagRequired(locodeInfoDBFlag)
|
||||||
|
|
||||||
|
flags.StringVar(&locodeInfoCode, locodeInfoCodeFlag, "", "UN/LOCODE")
|
||||||
|
_ = locodeInfoCmd.MarkFlagRequired(locodeInfoCodeFlag)
|
||||||
|
}
|
|
@ -23,9 +23,11 @@ func init() {
|
||||||
signCmd,
|
signCmd,
|
||||||
convertCmd,
|
convertCmd,
|
||||||
keyerCmd,
|
keyerCmd,
|
||||||
|
locodeCmd,
|
||||||
)
|
)
|
||||||
|
|
||||||
initSignCmd()
|
initSignCmd()
|
||||||
initConvertCmd()
|
initConvertCmd()
|
||||||
initKeyerCmd()
|
initKeyerCmd()
|
||||||
|
initLocodeCmd()
|
||||||
}
|
}
|
||||||
|
|
|
@ -7,7 +7,6 @@ import (
|
||||||
|
|
||||||
configViper "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/config"
|
configViper "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/config"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
control "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir"
|
|
||||||
"github.com/spf13/viper"
|
"github.com/spf13/viper"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
@ -41,8 +40,6 @@ func reloadConfig() error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
logPrm.PrependTimestamp = cfg.GetBool("logger.timestamp")
|
|
||||||
|
|
||||||
return logPrm.Reload()
|
return logPrm.Reload()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -84,10 +81,6 @@ func watchForSignal(cancel func()) {
|
||||||
return
|
return
|
||||||
case <-sighupCh:
|
case <-sighupCh:
|
||||||
log.Info(logs.FrostFSNodeSIGHUPHasBeenReceivedRereadingConfiguration)
|
log.Info(logs.FrostFSNodeSIGHUPHasBeenReceivedRereadingConfiguration)
|
||||||
if !innerRing.CompareAndSwapHealthStatus(control.HealthStatus_READY, control.HealthStatus_RECONFIGURING) {
|
|
||||||
log.Info(logs.FrostFSNodeSIGHUPSkip)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
err := reloadConfig()
|
err := reloadConfig()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(logs.FrostFSNodeConfigurationReading, zap.Error(err))
|
log.Error(logs.FrostFSNodeConfigurationReading, zap.Error(err))
|
||||||
|
@ -99,7 +92,6 @@ func watchForSignal(cancel func()) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(logs.FrostFSNodeConfigurationReading, zap.Error(err))
|
log.Error(logs.FrostFSNodeConfigurationReading, zap.Error(err))
|
||||||
}
|
}
|
||||||
innerRing.CompareAndSwapHealthStatus(control.HealthStatus_RECONFIGURING, control.HealthStatus_READY)
|
|
||||||
log.Info(logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully)
|
log.Info(logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -9,7 +9,6 @@ import (
|
||||||
func defaultConfiguration(cfg *viper.Viper) {
|
func defaultConfiguration(cfg *viper.Viper) {
|
||||||
cfg.SetDefault("logger.level", "info")
|
cfg.SetDefault("logger.level", "info")
|
||||||
cfg.SetDefault("logger.destination", "stdout")
|
cfg.SetDefault("logger.destination", "stdout")
|
||||||
cfg.SetDefault("logger.timestamp", false)
|
|
||||||
|
|
||||||
setPprofDefaults(cfg)
|
setPprofDefaults(cfg)
|
||||||
|
|
||||||
|
|
|
@ -13,7 +13,6 @@ import (
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/misc"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/misc"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/sdnotify"
|
|
||||||
"github.com/spf13/viper"
|
"github.com/spf13/viper"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
@ -79,8 +78,6 @@ func main() {
|
||||||
)
|
)
|
||||||
exitErr(err)
|
exitErr(err)
|
||||||
logPrm.SamplingHook = metrics.LogMetrics().GetSamplingHook()
|
logPrm.SamplingHook = metrics.LogMetrics().GetSamplingHook()
|
||||||
logPrm.PrependTimestamp = cfg.GetBool("logger.timestamp")
|
|
||||||
|
|
||||||
log, err = logger.NewLogger(logPrm)
|
log, err = logger.NewLogger(logPrm)
|
||||||
exitErr(err)
|
exitErr(err)
|
||||||
|
|
||||||
|
@ -127,8 +124,4 @@ func shutdown() {
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := sdnotify.ClearStatus(); err != nil {
|
|
||||||
log.Error(logs.FailedToReportStatusToSystemd, zap.Error(err))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,7 +19,7 @@ var Root = &cobra.Command{
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
Root.AddCommand(listCMD, inspectCMD, tuiCMD)
|
Root.AddCommand(listCMD, inspectCMD)
|
||||||
}
|
}
|
||||||
|
|
||||||
func openBlobovnicza(cmd *cobra.Command) *blobovnicza.Blobovnicza {
|
func openBlobovnicza(cmd *cobra.Command) *blobovnicza.Blobovnicza {
|
||||||
|
|
|
@ -1,79 +0,0 @@
|
||||||
package blobovnicza
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
common "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal"
|
|
||||||
schema "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/blobovnicza"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/tui"
|
|
||||||
"github.com/rivo/tview"
|
|
||||||
"github.com/spf13/cobra"
|
|
||||||
"go.etcd.io/bbolt"
|
|
||||||
)
|
|
||||||
|
|
||||||
var tuiCMD = &cobra.Command{
|
|
||||||
Use: "explore",
|
|
||||||
Short: "Blobovnicza exploration with a terminal UI",
|
|
||||||
Long: `Launch a terminal UI to explore blobovnicza and search for data.
|
|
||||||
|
|
||||||
Available search filters:
|
|
||||||
- cid CID
|
|
||||||
- oid OID
|
|
||||||
- addr CID/OID
|
|
||||||
`,
|
|
||||||
Run: tuiFunc,
|
|
||||||
}
|
|
||||||
|
|
||||||
var initialPrompt string
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
common.AddComponentPathFlag(tuiCMD, &vPath)
|
|
||||||
|
|
||||||
tuiCMD.Flags().StringVar(
|
|
||||||
&initialPrompt,
|
|
||||||
"filter",
|
|
||||||
"",
|
|
||||||
"Filter prompt to start with, format 'tag:value [+ tag:value]...'",
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
func tuiFunc(cmd *cobra.Command, _ []string) {
|
|
||||||
common.ExitOnErr(cmd, runTUI(cmd))
|
|
||||||
}
|
|
||||||
|
|
||||||
func runTUI(cmd *cobra.Command) error {
|
|
||||||
db, err := openDB(false)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("couldn't open database: %w", err)
|
|
||||||
}
|
|
||||||
defer db.Close()
|
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(cmd.Context())
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
app := tview.NewApplication()
|
|
||||||
ui := tui.NewUI(ctx, app, db, schema.BlobovniczaParser, nil)
|
|
||||||
|
|
||||||
_ = ui.AddFilter("cid", tui.CIDParser, "CID")
|
|
||||||
_ = ui.AddFilter("oid", tui.OIDParser, "OID")
|
|
||||||
_ = ui.AddCompositeFilter("addr", tui.AddressParser, "CID/OID")
|
|
||||||
|
|
||||||
err = ui.WithPrompt(initialPrompt)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("invalid filter prompt: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
app.SetRoot(ui, true).SetFocus(ui)
|
|
||||||
return app.Run()
|
|
||||||
}
|
|
||||||
|
|
||||||
func openDB(writable bool) (*bbolt.DB, error) {
|
|
||||||
db, err := bbolt.Open(vPath, 0o600, &bbolt.Options{
|
|
||||||
ReadOnly: !writable,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return db, nil
|
|
||||||
}
|
|
|
@ -32,7 +32,6 @@ func init() {
|
||||||
inspectCMD,
|
inspectCMD,
|
||||||
listGraveyardCMD,
|
listGraveyardCMD,
|
||||||
listGarbageCMD,
|
listGarbageCMD,
|
||||||
tuiCMD,
|
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,82 +0,0 @@
|
||||||
package meta
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
common "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal"
|
|
||||||
schema "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/metabase"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/tui"
|
|
||||||
"github.com/rivo/tview"
|
|
||||||
"github.com/spf13/cobra"
|
|
||||||
"go.etcd.io/bbolt"
|
|
||||||
)
|
|
||||||
|
|
||||||
var tuiCMD = &cobra.Command{
|
|
||||||
Use: "explore",
|
|
||||||
Short: "Metabase exploration with a terminal UI",
|
|
||||||
Long: `Launch a terminal UI to explore metabase and search for data.
|
|
||||||
|
|
||||||
Available search filters:
|
|
||||||
- cid CID
|
|
||||||
- oid OID
|
|
||||||
- addr CID/OID
|
|
||||||
- attr key[/value]
|
|
||||||
`,
|
|
||||||
Run: tuiFunc,
|
|
||||||
}
|
|
||||||
|
|
||||||
var initialPrompt string
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
common.AddComponentPathFlag(tuiCMD, &vPath)
|
|
||||||
|
|
||||||
tuiCMD.Flags().StringVar(
|
|
||||||
&initialPrompt,
|
|
||||||
"filter",
|
|
||||||
"",
|
|
||||||
"Filter prompt to start with, format 'tag:value [+ tag:value]...'",
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
func tuiFunc(cmd *cobra.Command, _ []string) {
|
|
||||||
common.ExitOnErr(cmd, runTUI(cmd))
|
|
||||||
}
|
|
||||||
|
|
||||||
func runTUI(cmd *cobra.Command) error {
|
|
||||||
db, err := openDB(false)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("couldn't open database: %w", err)
|
|
||||||
}
|
|
||||||
defer db.Close()
|
|
||||||
|
|
||||||
// Need if app was stopped with Ctrl-C.
|
|
||||||
ctx, cancel := context.WithCancel(cmd.Context())
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
app := tview.NewApplication()
|
|
||||||
ui := tui.NewUI(ctx, app, db, schema.MetabaseParser, nil)
|
|
||||||
|
|
||||||
_ = ui.AddFilter("cid", tui.CIDParser, "CID")
|
|
||||||
_ = ui.AddFilter("oid", tui.OIDParser, "OID")
|
|
||||||
_ = ui.AddCompositeFilter("addr", tui.AddressParser, "CID/OID")
|
|
||||||
_ = ui.AddCompositeFilter("attr", tui.AttributeParser, "key[/value]")
|
|
||||||
|
|
||||||
err = ui.WithPrompt(initialPrompt)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("invalid filter prompt: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
app.SetRoot(ui, true).SetFocus(ui)
|
|
||||||
return app.Run()
|
|
||||||
}
|
|
||||||
|
|
||||||
func openDB(writable bool) (*bbolt.DB, error) {
|
|
||||||
db, err := bbolt.Open(vPath, 0o600, &bbolt.Options{
|
|
||||||
ReadOnly: !writable,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return db, nil
|
|
||||||
}
|
|
|
@ -1,96 +0,0 @@
|
||||||
package blobovnicza
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/binary"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
|
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
|
||||||
"github.com/mr-tron/base58"
|
|
||||||
)
|
|
||||||
|
|
||||||
var BlobovniczaParser = common.WithFallback(
|
|
||||||
common.Any(
|
|
||||||
MetaBucketParser,
|
|
||||||
BucketParser,
|
|
||||||
),
|
|
||||||
common.RawParser.ToFallbackParser(),
|
|
||||||
)
|
|
||||||
|
|
||||||
func MetaBucketParser(key, value []byte) (common.SchemaEntry, common.Parser, error) {
|
|
||||||
if value != nil {
|
|
||||||
return nil, nil, errors.New("not a bucket")
|
|
||||||
}
|
|
||||||
|
|
||||||
if string(key) != "META" {
|
|
||||||
return nil, nil, errors.New("invalid bucket name")
|
|
||||||
}
|
|
||||||
|
|
||||||
return &MetaBucket{}, MetaRecordParser, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func MetaRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) {
|
|
||||||
var r MetaRecord
|
|
||||||
|
|
||||||
if len(key) == 0 {
|
|
||||||
return nil, nil, errors.New("invalid key")
|
|
||||||
}
|
|
||||||
|
|
||||||
r.label = string(key)
|
|
||||||
r.count = binary.LittleEndian.Uint64(value)
|
|
||||||
|
|
||||||
return &r, nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func BucketParser(key, value []byte) (common.SchemaEntry, common.Parser, error) {
|
|
||||||
if value != nil {
|
|
||||||
return nil, nil, errors.New("not a bucket")
|
|
||||||
}
|
|
||||||
|
|
||||||
size, n := binary.Varint(key)
|
|
||||||
if n <= 0 {
|
|
||||||
return nil, nil, errors.New("invalid size")
|
|
||||||
}
|
|
||||||
|
|
||||||
return &Bucket{size: size}, RecordParser, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func RecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) {
|
|
||||||
parts := strings.Split(string(key), "/")
|
|
||||||
|
|
||||||
if len(parts) != 2 {
|
|
||||||
return nil, nil, errors.New("invalid key, expected address string <CID>/<OID>")
|
|
||||||
}
|
|
||||||
|
|
||||||
cnrRaw, err := base58.Decode(parts[0])
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, errors.New("can't decode CID string")
|
|
||||||
}
|
|
||||||
objRaw, err := base58.Decode(parts[1])
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, errors.New("can't decode OID string")
|
|
||||||
}
|
|
||||||
|
|
||||||
cnr := cid.ID{}
|
|
||||||
if err := cnr.Decode(cnrRaw); err != nil {
|
|
||||||
return nil, nil, fmt.Errorf("can't decode CID: %w", err)
|
|
||||||
}
|
|
||||||
obj := oid.ID{}
|
|
||||||
if err := obj.Decode(objRaw); err != nil {
|
|
||||||
return nil, nil, fmt.Errorf("can't decode OID: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var r Record
|
|
||||||
|
|
||||||
r.addr.SetContainer(cnr)
|
|
||||||
r.addr.SetObject(obj)
|
|
||||||
|
|
||||||
if err := r.object.Unmarshal(value); err != nil {
|
|
||||||
return nil, nil, errors.New("can't unmarshal object")
|
|
||||||
}
|
|
||||||
|
|
||||||
return &r, nil, nil
|
|
||||||
}
|
|
|
@ -1,101 +0,0 @@
|
||||||
package blobovnicza
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"strconv"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
|
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
|
||||||
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
|
||||||
"github.com/davecgh/go-spew/spew"
|
|
||||||
"github.com/gdamore/tcell/v2"
|
|
||||||
"github.com/rivo/tview"
|
|
||||||
)
|
|
||||||
|
|
||||||
type (
|
|
||||||
MetaBucket struct{}
|
|
||||||
|
|
||||||
MetaRecord struct {
|
|
||||||
label string
|
|
||||||
count uint64
|
|
||||||
}
|
|
||||||
|
|
||||||
Bucket struct {
|
|
||||||
size int64
|
|
||||||
}
|
|
||||||
|
|
||||||
Record struct {
|
|
||||||
addr oid.Address
|
|
||||||
object objectSDK.Object
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
func (b *MetaBucket) String() string {
|
|
||||||
return common.FormatSimple("META", tcell.ColorLime)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *MetaBucket) DetailedString() string {
|
|
||||||
return spew.Sdump(*b)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *MetaBucket) Filter(string, any) common.FilterResult {
|
|
||||||
return common.No
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *MetaRecord) String() string {
|
|
||||||
return fmt.Sprintf("%-11s %c %d", r.label, tview.Borders.Vertical, r.count)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *MetaRecord) DetailedString() string {
|
|
||||||
return spew.Sdump(*r)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *MetaRecord) Filter(string, any) common.FilterResult {
|
|
||||||
return common.No
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *Bucket) String() string {
|
|
||||||
return common.FormatSimple(strconv.FormatInt(b.size, 10), tcell.ColorLime)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *Bucket) DetailedString() string {
|
|
||||||
return spew.Sdump(*b)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *Bucket) Filter(typ string, _ any) common.FilterResult {
|
|
||||||
switch typ {
|
|
||||||
case "cid":
|
|
||||||
return common.Maybe
|
|
||||||
case "oid":
|
|
||||||
return common.Maybe
|
|
||||||
default:
|
|
||||||
return common.No
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Record) String() string {
|
|
||||||
return fmt.Sprintf(
|
|
||||||
"CID %s OID %s %c Object {...}",
|
|
||||||
common.FormatSimple(fmt.Sprintf("%-44s", r.addr.Container()), tcell.ColorAqua),
|
|
||||||
common.FormatSimple(fmt.Sprintf("%-44s", r.addr.Object()), tcell.ColorAqua),
|
|
||||||
tview.Borders.Vertical,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Record) DetailedString() string {
|
|
||||||
return spew.Sdump(*r)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Record) Filter(typ string, val any) common.FilterResult {
|
|
||||||
switch typ {
|
|
||||||
case "cid":
|
|
||||||
id := val.(cid.ID)
|
|
||||||
return common.IfThenElse(r.addr.Container().Equals(id), common.Yes, common.No)
|
|
||||||
case "oid":
|
|
||||||
id := val.(oid.ID)
|
|
||||||
return common.IfThenElse(r.addr.Object().Equals(id), common.Yes, common.No)
|
|
||||||
default:
|
|
||||||
return common.No
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,43 +0,0 @@
|
||||||
package common
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"strconv"
|
|
||||||
|
|
||||||
"github.com/gdamore/tcell/v2"
|
|
||||||
)
|
|
||||||
|
|
||||||
type FormatOptions struct {
|
|
||||||
Color tcell.Color
|
|
||||||
|
|
||||||
Bold,
|
|
||||||
Italic,
|
|
||||||
Underline,
|
|
||||||
StrikeThrough bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func Format(s string, opts FormatOptions) string {
|
|
||||||
var boldTag, italicTag, underlineTag, strikeThroughTag string
|
|
||||||
|
|
||||||
switch {
|
|
||||||
case opts.Bold:
|
|
||||||
boldTag = "b"
|
|
||||||
case opts.Italic:
|
|
||||||
italicTag = "i"
|
|
||||||
case opts.Underline:
|
|
||||||
underlineTag = "u"
|
|
||||||
case opts.StrikeThrough:
|
|
||||||
strikeThroughTag = "s"
|
|
||||||
}
|
|
||||||
|
|
||||||
attrs := fmt.Sprintf(
|
|
||||||
"%s%s%s%s", boldTag, italicTag, underlineTag, strikeThroughTag,
|
|
||||||
)
|
|
||||||
color := strconv.FormatInt(int64(opts.Color.Hex()), 16)
|
|
||||||
|
|
||||||
return fmt.Sprintf("[#%06s::%s]%s[-::-]", color, attrs, s)
|
|
||||||
}
|
|
||||||
|
|
||||||
func FormatSimple(s string, c tcell.Color) string {
|
|
||||||
return Format(s, FormatOptions{Color: c})
|
|
||||||
}
|
|
|
@ -1,29 +0,0 @@
|
||||||
package common
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/davecgh/go-spew/spew"
|
|
||||||
"github.com/gdamore/tcell/v2"
|
|
||||||
"github.com/mr-tron/base58"
|
|
||||||
)
|
|
||||||
|
|
||||||
type RawEntry struct {
|
|
||||||
key, value []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
var RawParser Parser = rawParser
|
|
||||||
|
|
||||||
func rawParser(key, value []byte) (SchemaEntry, Parser, error) {
|
|
||||||
return &RawEntry{key: key, value: value}, rawParser, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *RawEntry) String() string {
|
|
||||||
return FormatSimple(base58.Encode(r.key), tcell.ColorRed)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *RawEntry) DetailedString() string {
|
|
||||||
return spew.Sdump(r)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *RawEntry) Filter(string, any) FilterResult {
|
|
||||||
return No
|
|
||||||
}
|
|
|
@ -1,81 +0,0 @@
|
||||||
package common
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
)
|
|
||||||
|
|
||||||
type FilterResult byte
|
|
||||||
|
|
||||||
const (
|
|
||||||
No FilterResult = iota
|
|
||||||
Maybe
|
|
||||||
Yes
|
|
||||||
)
|
|
||||||
|
|
||||||
func IfThenElse(condition bool, onSuccess, onFailure FilterResult) FilterResult {
|
|
||||||
var res FilterResult
|
|
||||||
if condition {
|
|
||||||
res = onSuccess
|
|
||||||
} else {
|
|
||||||
res = onFailure
|
|
||||||
}
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
type SchemaEntry interface {
|
|
||||||
String() string
|
|
||||||
DetailedString() string
|
|
||||||
Filter(typ string, val any) FilterResult
|
|
||||||
}
|
|
||||||
|
|
||||||
type (
|
|
||||||
Parser func(key, value []byte) (SchemaEntry, Parser, error)
|
|
||||||
FallbackParser func(key, value []byte) (SchemaEntry, Parser)
|
|
||||||
)
|
|
||||||
|
|
||||||
func Any(parsers ...Parser) Parser {
|
|
||||||
return func(key, value []byte) (SchemaEntry, Parser, error) {
|
|
||||||
var errs error
|
|
||||||
for _, parser := range parsers {
|
|
||||||
ret, next, err := parser(key, value)
|
|
||||||
if err == nil {
|
|
||||||
return ret, next, nil
|
|
||||||
}
|
|
||||||
errs = errors.Join(errs, err)
|
|
||||||
}
|
|
||||||
return nil, nil, fmt.Errorf("no parser succeeded: %w", errs)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func WithFallback(parser Parser, fallback FallbackParser) Parser {
|
|
||||||
if parser == nil {
|
|
||||||
return fallback.ToParser()
|
|
||||||
}
|
|
||||||
return func(key, value []byte) (SchemaEntry, Parser, error) {
|
|
||||||
entry, next, err := parser(key, value)
|
|
||||||
if err == nil {
|
|
||||||
return entry, WithFallback(next, fallback), nil
|
|
||||||
}
|
|
||||||
return fallback.ToParser()(key, value)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fp FallbackParser) ToParser() Parser {
|
|
||||||
return func(key, value []byte) (SchemaEntry, Parser, error) {
|
|
||||||
entry, next := fp(key, value)
|
|
||||||
return entry, next, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p Parser) ToFallbackParser() FallbackParser {
|
|
||||||
return func(key, value []byte) (SchemaEntry, Parser) {
|
|
||||||
entry, next, err := p(key, value)
|
|
||||||
if err != nil {
|
|
||||||
panic(fmt.Errorf(
|
|
||||||
"couldn't use that parser as a fallback parser, it returned an error: %w", err,
|
|
||||||
))
|
|
||||||
}
|
|
||||||
return entry, next
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,29 +0,0 @@
|
||||||
package buckets
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/davecgh/go-spew/spew"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (b *PrefixBucket) DetailedString() string {
|
|
||||||
return spew.Sdump(*b)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *PrefixContainerBucket) DetailedString() string {
|
|
||||||
return spew.Sdump(*b)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *UserBucket) DetailedString() string {
|
|
||||||
return spew.Sdump(*b)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *ContainerBucket) DetailedString() string {
|
|
||||||
return spew.Sdump(*b)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *UserAttributeKeyBucket) DetailedString() string {
|
|
||||||
return spew.Sdump(*b)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *UserAttributeValueBucket) DetailedString() string {
|
|
||||||
return spew.Sdump(*b)
|
|
||||||
}
|
|
|
@ -1,81 +0,0 @@
|
||||||
package buckets
|
|
||||||
|
|
||||||
import (
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
|
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (b *PrefixBucket) Filter(typ string, _ any) common.FilterResult {
|
|
||||||
switch typ {
|
|
||||||
case "cid":
|
|
||||||
return b.resolvers.cidResolver(false)
|
|
||||||
case "oid":
|
|
||||||
return b.resolvers.oidResolver(false)
|
|
||||||
default:
|
|
||||||
return common.No
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *PrefixContainerBucket) Filter(typ string, val any) common.FilterResult {
|
|
||||||
switch typ {
|
|
||||||
case "cid":
|
|
||||||
id := val.(cid.ID)
|
|
||||||
return b.resolvers.cidResolver(b.id.Equals(id))
|
|
||||||
case "oid":
|
|
||||||
return b.resolvers.oidResolver(false)
|
|
||||||
default:
|
|
||||||
return common.No
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *UserBucket) Filter(typ string, _ any) common.FilterResult {
|
|
||||||
switch typ {
|
|
||||||
case "cid":
|
|
||||||
return b.resolvers.cidResolver(false)
|
|
||||||
case "oid":
|
|
||||||
return b.resolvers.oidResolver(false)
|
|
||||||
default:
|
|
||||||
return common.No
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *ContainerBucket) Filter(typ string, val any) common.FilterResult {
|
|
||||||
switch typ {
|
|
||||||
case "cid":
|
|
||||||
id := val.(cid.ID)
|
|
||||||
return b.resolvers.cidResolver(b.id.Equals(id))
|
|
||||||
case "oid":
|
|
||||||
return b.resolvers.oidResolver(false)
|
|
||||||
default:
|
|
||||||
return common.No
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *UserAttributeKeyBucket) Filter(typ string, val any) common.FilterResult {
|
|
||||||
switch typ {
|
|
||||||
case "cid":
|
|
||||||
id := val.(cid.ID)
|
|
||||||
return common.IfThenElse(b.id.Equals(id), common.Yes, common.No)
|
|
||||||
case "oid":
|
|
||||||
return common.Maybe
|
|
||||||
case "key":
|
|
||||||
key := val.(string)
|
|
||||||
return common.IfThenElse(b.key == key, common.Yes, common.No)
|
|
||||||
case "value":
|
|
||||||
return common.Maybe
|
|
||||||
default:
|
|
||||||
return common.No
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *UserAttributeValueBucket) Filter(typ string, val any) common.FilterResult {
|
|
||||||
switch typ {
|
|
||||||
case "oid":
|
|
||||||
return common.Maybe
|
|
||||||
case "value":
|
|
||||||
value := val.(string)
|
|
||||||
return common.IfThenElse(b.value == value, common.Yes, common.No)
|
|
||||||
default:
|
|
||||||
return common.No
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,111 +0,0 @@
|
||||||
package buckets
|
|
||||||
|
|
||||||
import (
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/metabase/records"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
GraveyardParser = NewPrefixBucketParser(Graveyard, records.GraveyardRecordParser, Resolvers{
|
|
||||||
cidResolver: LenientResolver,
|
|
||||||
oidResolver: LenientResolver,
|
|
||||||
})
|
|
||||||
|
|
||||||
GarbageParser = NewPrefixBucketParser(Garbage, records.GarbageRecordParser, Resolvers{
|
|
||||||
cidResolver: LenientResolver,
|
|
||||||
oidResolver: LenientResolver,
|
|
||||||
})
|
|
||||||
|
|
||||||
ContainerVolumeParser = NewPrefixBucketParser(ContainerVolume, records.ContainerVolumeRecordParser, Resolvers{
|
|
||||||
cidResolver: LenientResolver,
|
|
||||||
oidResolver: StrictResolver,
|
|
||||||
})
|
|
||||||
|
|
||||||
LockedParser = NewPrefixBucketParser(
|
|
||||||
Locked,
|
|
||||||
NewContainerBucketParser(
|
|
||||||
records.LockedRecordParser,
|
|
||||||
Resolvers{
|
|
||||||
cidResolver: StrictResolver,
|
|
||||||
oidResolver: LenientResolver,
|
|
||||||
},
|
|
||||||
),
|
|
||||||
Resolvers{
|
|
||||||
cidResolver: LenientResolver,
|
|
||||||
oidResolver: LenientResolver,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
ShardInfoParser = NewPrefixBucketParser(ShardInfo, records.ShardInfoRecordParser, Resolvers{
|
|
||||||
cidResolver: StrictResolver,
|
|
||||||
oidResolver: StrictResolver,
|
|
||||||
})
|
|
||||||
|
|
||||||
PrimaryParser = NewPrefixContainerBucketParser(Primary, records.ObjectRecordParser, Resolvers{
|
|
||||||
cidResolver: StrictResolver,
|
|
||||||
oidResolver: LenientResolver,
|
|
||||||
})
|
|
||||||
|
|
||||||
LockersParser = NewPrefixContainerBucketParser(Lockers, records.ObjectRecordParser, Resolvers{
|
|
||||||
cidResolver: StrictResolver,
|
|
||||||
oidResolver: LenientResolver,
|
|
||||||
})
|
|
||||||
|
|
||||||
TombstoneParser = NewPrefixContainerBucketParser(Tombstone, records.ObjectRecordParser, Resolvers{
|
|
||||||
cidResolver: StrictResolver,
|
|
||||||
oidResolver: LenientResolver,
|
|
||||||
})
|
|
||||||
|
|
||||||
SmallParser = NewPrefixContainerBucketParser(Small, records.SmallRecordParser, Resolvers{
|
|
||||||
cidResolver: StrictResolver,
|
|
||||||
oidResolver: LenientResolver,
|
|
||||||
})
|
|
||||||
|
|
||||||
RootParser = NewPrefixContainerBucketParser(Root, records.RootRecordParser, Resolvers{
|
|
||||||
cidResolver: StrictResolver,
|
|
||||||
oidResolver: LenientResolver,
|
|
||||||
})
|
|
||||||
|
|
||||||
OwnerParser = NewPrefixContainerBucketParser(
|
|
||||||
Owner,
|
|
||||||
NewUserBucketParser(
|
|
||||||
records.OwnerRecordParser,
|
|
||||||
Resolvers{
|
|
||||||
cidResolver: StrictResolver,
|
|
||||||
oidResolver: LenientResolver,
|
|
||||||
},
|
|
||||||
),
|
|
||||||
Resolvers{
|
|
||||||
cidResolver: StrictResolver,
|
|
||||||
oidResolver: LenientResolver,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
UserAttributeParser = NewUserAttributeKeyBucketParser(
|
|
||||||
NewUserAttributeValueBucketParser(records.UserAttributeRecordParser),
|
|
||||||
)
|
|
||||||
|
|
||||||
PayloadHashParser = NewPrefixContainerBucketParser(PayloadHash, records.PayloadHashRecordParser, Resolvers{
|
|
||||||
cidResolver: StrictResolver,
|
|
||||||
oidResolver: StrictResolver,
|
|
||||||
})
|
|
||||||
|
|
||||||
ParentParser = NewPrefixContainerBucketParser(Parent, records.ParentRecordParser, Resolvers{
|
|
||||||
cidResolver: StrictResolver,
|
|
||||||
oidResolver: LenientResolver,
|
|
||||||
})
|
|
||||||
|
|
||||||
SplitParser = NewPrefixContainerBucketParser(Split, records.SplitRecordParser, Resolvers{
|
|
||||||
cidResolver: StrictResolver,
|
|
||||||
oidResolver: StrictResolver,
|
|
||||||
})
|
|
||||||
|
|
||||||
ContainerCountersParser = NewPrefixBucketParser(ContainerCounters, records.ContainerCountersRecordParser, Resolvers{
|
|
||||||
cidResolver: LenientResolver,
|
|
||||||
oidResolver: StrictResolver,
|
|
||||||
})
|
|
||||||
|
|
||||||
ECInfoParser = NewPrefixContainerBucketParser(ECInfo, records.ECInfoRecordParser, Resolvers{
|
|
||||||
cidResolver: StrictResolver,
|
|
||||||
oidResolver: LenientResolver,
|
|
||||||
})
|
|
||||||
)
|
|
|
@ -1,53 +0,0 @@
|
||||||
package buckets
|
|
||||||
|
|
||||||
type Prefix byte
|
|
||||||
|
|
||||||
const (
|
|
||||||
Graveyard Prefix = iota
|
|
||||||
Garbage
|
|
||||||
ToMoveIt
|
|
||||||
ContainerVolume
|
|
||||||
Locked
|
|
||||||
ShardInfo
|
|
||||||
Primary
|
|
||||||
Lockers
|
|
||||||
_
|
|
||||||
Tombstone
|
|
||||||
Small
|
|
||||||
Root
|
|
||||||
Owner
|
|
||||||
UserAttribute
|
|
||||||
PayloadHash
|
|
||||||
Parent
|
|
||||||
Split
|
|
||||||
ContainerCounters
|
|
||||||
ECInfo
|
|
||||||
)
|
|
||||||
|
|
||||||
var x = map[Prefix]string{
|
|
||||||
Graveyard: "Graveyard",
|
|
||||||
Garbage: "Garbage",
|
|
||||||
ToMoveIt: "To Move It",
|
|
||||||
ContainerVolume: "Container Volume",
|
|
||||||
Locked: "Locked",
|
|
||||||
ShardInfo: "Shard Info",
|
|
||||||
Primary: "Primary",
|
|
||||||
Lockers: "Lockers",
|
|
||||||
Tombstone: "Tombstone",
|
|
||||||
Small: "Small",
|
|
||||||
Root: "Root",
|
|
||||||
Owner: "Owner",
|
|
||||||
UserAttribute: "User Attribute",
|
|
||||||
PayloadHash: "Payload Hash",
|
|
||||||
Parent: "Parent",
|
|
||||||
Split: "Split",
|
|
||||||
ContainerCounters: "Container Counters",
|
|
||||||
ECInfo: "EC Info",
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p Prefix) String() string {
|
|
||||||
if s, ok := x[p]; ok {
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
return "Unknown Prefix"
|
|
||||||
}
|
|
|
@ -1,48 +0,0 @@
|
||||||
package buckets
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
|
|
||||||
"github.com/gdamore/tcell/v2"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (b *PrefixBucket) String() string {
|
|
||||||
return common.FormatSimple(
|
|
||||||
fmt.Sprintf("(%2d %-18s)", b.prefix, b.prefix), tcell.ColorLime,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *PrefixContainerBucket) String() string {
|
|
||||||
return fmt.Sprintf(
|
|
||||||
"%s CID %s",
|
|
||||||
common.FormatSimple(
|
|
||||||
fmt.Sprintf("(%2d %-18s)", b.prefix, b.prefix), tcell.ColorLime,
|
|
||||||
),
|
|
||||||
common.FormatSimple(b.id.String(), tcell.ColorAqua),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *UserBucket) String() string {
|
|
||||||
return "UID " + common.FormatSimple(b.id.String(), tcell.ColorAqua)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *ContainerBucket) String() string {
|
|
||||||
return "CID " + common.FormatSimple(b.id.String(), tcell.ColorAqua)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *UserAttributeKeyBucket) String() string {
|
|
||||||
return fmt.Sprintf("%s CID %s ATTR-KEY %s",
|
|
||||||
common.FormatSimple(
|
|
||||||
fmt.Sprintf("(%2d %-18s)", b.prefix, b.prefix), tcell.ColorLime,
|
|
||||||
),
|
|
||||||
common.FormatSimple(
|
|
||||||
fmt.Sprintf("%-44s", b.id), tcell.ColorAqua,
|
|
||||||
),
|
|
||||||
common.FormatSimple(b.key, tcell.ColorAqua),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *UserAttributeValueBucket) String() string {
|
|
||||||
return "ATTR-VALUE " + common.FormatSimple(b.value, tcell.ColorAqua)
|
|
||||||
}
|
|
|
@ -1,166 +0,0 @@
|
||||||
package buckets
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
|
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
|
||||||
"github.com/mr-tron/base58"
|
|
||||||
)
|
|
||||||
|
|
||||||
type (
|
|
||||||
PrefixBucket struct {
|
|
||||||
prefix Prefix
|
|
||||||
resolvers Resolvers
|
|
||||||
}
|
|
||||||
|
|
||||||
PrefixContainerBucket struct {
|
|
||||||
prefix Prefix
|
|
||||||
id cid.ID
|
|
||||||
resolvers Resolvers
|
|
||||||
}
|
|
||||||
|
|
||||||
ContainerBucket struct {
|
|
||||||
id cid.ID
|
|
||||||
resolvers Resolvers
|
|
||||||
}
|
|
||||||
|
|
||||||
UserBucket struct {
|
|
||||||
id user.ID
|
|
||||||
resolvers Resolvers
|
|
||||||
}
|
|
||||||
|
|
||||||
UserAttributeKeyBucket struct {
|
|
||||||
prefix Prefix
|
|
||||||
id cid.ID
|
|
||||||
key string
|
|
||||||
}
|
|
||||||
|
|
||||||
UserAttributeValueBucket struct {
|
|
||||||
value string
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
type (
|
|
||||||
FilterResolver = func(result bool) common.FilterResult
|
|
||||||
|
|
||||||
Resolvers struct {
|
|
||||||
cidResolver FilterResolver
|
|
||||||
oidResolver FilterResolver
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
StrictResolver = func(x bool) common.FilterResult { return common.IfThenElse(x, common.Yes, common.No) }
|
|
||||||
LenientResolver = func(x bool) common.FilterResult { return common.IfThenElse(x, common.Yes, common.Maybe) }
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
ErrNotBucket = errors.New("not a bucket")
|
|
||||||
ErrInvalidKeyLength = errors.New("invalid key length")
|
|
||||||
ErrInvalidValueLength = errors.New("invalid value length")
|
|
||||||
ErrInvalidPrefix = errors.New("invalid prefix")
|
|
||||||
)
|
|
||||||
|
|
||||||
func NewPrefixBucketParser(prefix Prefix, next common.Parser, resolvers Resolvers) common.Parser {
|
|
||||||
return func(key, value []byte) (common.SchemaEntry, common.Parser, error) {
|
|
||||||
if value != nil {
|
|
||||||
return nil, nil, ErrNotBucket
|
|
||||||
}
|
|
||||||
if len(key) != 1 {
|
|
||||||
return nil, nil, ErrInvalidKeyLength
|
|
||||||
}
|
|
||||||
var b PrefixBucket
|
|
||||||
if b.prefix = Prefix(key[0]); b.prefix != prefix {
|
|
||||||
return nil, nil, ErrInvalidPrefix
|
|
||||||
}
|
|
||||||
b.resolvers = resolvers
|
|
||||||
return &b, next, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewPrefixContainerBucketParser(prefix Prefix, next common.Parser, resolvers Resolvers) common.Parser {
|
|
||||||
return func(key, value []byte) (common.SchemaEntry, common.Parser, error) {
|
|
||||||
if value != nil {
|
|
||||||
return nil, nil, ErrNotBucket
|
|
||||||
}
|
|
||||||
if len(key) != 33 {
|
|
||||||
return nil, nil, ErrInvalidKeyLength
|
|
||||||
}
|
|
||||||
var b PrefixContainerBucket
|
|
||||||
if b.prefix = Prefix(key[0]); b.prefix != prefix {
|
|
||||||
return nil, nil, ErrInvalidPrefix
|
|
||||||
}
|
|
||||||
if err := b.id.Decode(key[1:]); err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
b.resolvers = resolvers
|
|
||||||
return &b, next, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewUserBucketParser(next common.Parser, resolvers Resolvers) common.Parser {
|
|
||||||
return func(key, value []byte) (common.SchemaEntry, common.Parser, error) {
|
|
||||||
if value != nil {
|
|
||||||
return nil, nil, ErrNotBucket
|
|
||||||
}
|
|
||||||
var b UserBucket
|
|
||||||
if err := b.id.DecodeString(base58.Encode(key)); err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
b.resolvers = resolvers
|
|
||||||
return &b, next, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewContainerBucketParser(next common.Parser, resolvers Resolvers) common.Parser {
|
|
||||||
return func(key, value []byte) (common.SchemaEntry, common.Parser, error) {
|
|
||||||
if value != nil {
|
|
||||||
return nil, nil, ErrNotBucket
|
|
||||||
}
|
|
||||||
if len(key) != 32 {
|
|
||||||
return nil, nil, ErrInvalidKeyLength
|
|
||||||
}
|
|
||||||
var b ContainerBucket
|
|
||||||
if err := b.id.Decode(key); err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
b.resolvers = resolvers
|
|
||||||
return &b, next, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewUserAttributeKeyBucketParser(next common.Parser) common.Parser {
|
|
||||||
return func(key, value []byte) (common.SchemaEntry, common.Parser, error) {
|
|
||||||
if value != nil {
|
|
||||||
return nil, nil, ErrNotBucket
|
|
||||||
}
|
|
||||||
if len(key) < 34 {
|
|
||||||
return nil, nil, ErrInvalidKeyLength
|
|
||||||
}
|
|
||||||
var b UserAttributeKeyBucket
|
|
||||||
if b.prefix = Prefix(key[0]); b.prefix != UserAttribute {
|
|
||||||
return nil, nil, ErrInvalidPrefix
|
|
||||||
}
|
|
||||||
if err := b.id.Decode(key[1:33]); err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
b.key = string(key[33:])
|
|
||||||
return &b, next, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewUserAttributeValueBucketParser(next common.Parser) common.Parser {
|
|
||||||
return func(key, value []byte) (common.SchemaEntry, common.Parser, error) {
|
|
||||||
if value != nil {
|
|
||||||
return nil, nil, ErrNotBucket
|
|
||||||
}
|
|
||||||
if len(key) == 0 {
|
|
||||||
return nil, nil, ErrInvalidKeyLength
|
|
||||||
}
|
|
||||||
var b UserAttributeValueBucket
|
|
||||||
b.value = string(key)
|
|
||||||
return &b, next, nil
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,29 +0,0 @@
|
||||||
package metabase
|
|
||||||
|
|
||||||
import (
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/metabase/buckets"
|
|
||||||
)
|
|
||||||
|
|
||||||
var MetabaseParser = common.WithFallback(
|
|
||||||
common.Any(
|
|
||||||
buckets.GraveyardParser,
|
|
||||||
buckets.GarbageParser,
|
|
||||||
buckets.ContainerVolumeParser,
|
|
||||||
buckets.LockedParser,
|
|
||||||
buckets.ShardInfoParser,
|
|
||||||
buckets.PrimaryParser,
|
|
||||||
buckets.LockersParser,
|
|
||||||
buckets.TombstoneParser,
|
|
||||||
buckets.SmallParser,
|
|
||||||
buckets.RootParser,
|
|
||||||
buckets.OwnerParser,
|
|
||||||
buckets.UserAttributeParser,
|
|
||||||
buckets.PayloadHashParser,
|
|
||||||
buckets.ParentParser,
|
|
||||||
buckets.SplitParser,
|
|
||||||
buckets.ContainerCountersParser,
|
|
||||||
buckets.ECInfoParser,
|
|
||||||
),
|
|
||||||
common.RawParser.ToFallbackParser(),
|
|
||||||
)
|
|
|
@ -1,65 +0,0 @@
|
||||||
package records
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/davecgh/go-spew/spew"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (r *GraveyardRecord) DetailedString() string {
|
|
||||||
return spew.Sdump(*r)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *GarbageRecord) DetailedString() string {
|
|
||||||
return spew.Sdump(*r)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *ContainerVolumeRecord) DetailedString() string {
|
|
||||||
return spew.Sdump(*r)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *LockedRecord) DetailedString() string {
|
|
||||||
return spew.Sdump(*r)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *ShardInfoRecord) DetailedString() string {
|
|
||||||
return spew.Sdump(*r)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *ObjectRecord) DetailedString() string {
|
|
||||||
return spew.Sdump(*r)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *SmallRecord) DetailedString() string {
|
|
||||||
return spew.Sdump(*r)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *RootRecord) DetailedString() string {
|
|
||||||
return spew.Sdump(*r)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *OwnerRecord) DetailedString() string {
|
|
||||||
return spew.Sdump(*r)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *UserAttributeRecord) DetailedString() string {
|
|
||||||
return spew.Sdump(*r)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *PayloadHashRecord) DetailedString() string {
|
|
||||||
return spew.Sdump(*r)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *ParentRecord) DetailedString() string {
|
|
||||||
return spew.Sdump(*r)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *SplitRecord) DetailedString() string {
|
|
||||||
return spew.Sdump(*r)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *ContainerCountersRecord) DetailedString() string {
|
|
||||||
return spew.Sdump(*r)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *ECInfoRecord) DetailedString() string {
|
|
||||||
return spew.Sdump(*r)
|
|
||||||
}
|
|
|
@ -1,145 +0,0 @@
|
||||||
package records
|
|
||||||
|
|
||||||
import (
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
|
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (r *GraveyardRecord) Filter(typ string, val any) common.FilterResult {
|
|
||||||
switch typ {
|
|
||||||
case "cid":
|
|
||||||
id := val.(cid.ID)
|
|
||||||
return common.IfThenElse(r.object.Container().Equals(id), common.Yes, common.No)
|
|
||||||
case "oid":
|
|
||||||
id := val.(oid.ID)
|
|
||||||
return common.IfThenElse(r.object.Object().Equals(id), common.Yes, common.No)
|
|
||||||
default:
|
|
||||||
return common.No
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *GarbageRecord) Filter(typ string, val any) common.FilterResult {
|
|
||||||
switch typ {
|
|
||||||
case "cid":
|
|
||||||
id := val.(cid.ID)
|
|
||||||
return common.IfThenElse(r.addr.Container().Equals(id), common.Yes, common.No)
|
|
||||||
case "oid":
|
|
||||||
id := val.(oid.ID)
|
|
||||||
return common.IfThenElse(r.addr.Object().Equals(id), common.Yes, common.No)
|
|
||||||
default:
|
|
||||||
return common.No
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *ContainerVolumeRecord) Filter(typ string, val any) common.FilterResult {
|
|
||||||
switch typ {
|
|
||||||
case "cid":
|
|
||||||
id := val.(cid.ID)
|
|
||||||
return common.IfThenElse(r.id.Equals(id), common.Yes, common.No)
|
|
||||||
default:
|
|
||||||
return common.No
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *ShardInfoRecord) Filter(string, any) common.FilterResult {
|
|
||||||
return common.No
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *LockedRecord) Filter(typ string, val any) common.FilterResult {
|
|
||||||
switch typ {
|
|
||||||
case "oid":
|
|
||||||
id := val.(oid.ID)
|
|
||||||
return common.IfThenElse(r.id.Equals(id), common.Yes, common.No)
|
|
||||||
default:
|
|
||||||
return common.No
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *ObjectRecord) Filter(typ string, val any) common.FilterResult {
|
|
||||||
switch typ {
|
|
||||||
case "oid":
|
|
||||||
id := val.(oid.ID)
|
|
||||||
return common.IfThenElse(r.id.Equals(id), common.Yes, common.No)
|
|
||||||
default:
|
|
||||||
return common.No
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *SmallRecord) Filter(typ string, val any) common.FilterResult {
|
|
||||||
switch typ {
|
|
||||||
case "oid":
|
|
||||||
id := val.(oid.ID)
|
|
||||||
return common.IfThenElse(r.id.Equals(id), common.Yes, common.No)
|
|
||||||
default:
|
|
||||||
return common.No
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *RootRecord) Filter(typ string, val any) common.FilterResult {
|
|
||||||
switch typ {
|
|
||||||
case "oid":
|
|
||||||
id := val.(oid.ID)
|
|
||||||
return common.IfThenElse(r.id.Equals(id), common.Yes, common.No)
|
|
||||||
default:
|
|
||||||
return common.No
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *OwnerRecord) Filter(typ string, val any) common.FilterResult {
|
|
||||||
switch typ {
|
|
||||||
case "oid":
|
|
||||||
id := val.(oid.ID)
|
|
||||||
return common.IfThenElse(r.id.Equals(id), common.Yes, common.No)
|
|
||||||
default:
|
|
||||||
return common.No
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *UserAttributeRecord) Filter(typ string, val any) common.FilterResult {
|
|
||||||
switch typ {
|
|
||||||
case "oid":
|
|
||||||
id := val.(oid.ID)
|
|
||||||
return common.IfThenElse(r.id.Equals(id), common.Yes, common.No)
|
|
||||||
default:
|
|
||||||
return common.No
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *PayloadHashRecord) Filter(string, any) common.FilterResult {
|
|
||||||
return common.No
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *ParentRecord) Filter(typ string, val any) common.FilterResult {
|
|
||||||
switch typ {
|
|
||||||
case "oid":
|
|
||||||
id := val.(oid.ID)
|
|
||||||
return common.IfThenElse(r.parent.Equals(id), common.Yes, common.No)
|
|
||||||
default:
|
|
||||||
return common.No
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *SplitRecord) Filter(string, any) common.FilterResult {
|
|
||||||
return common.No
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *ContainerCountersRecord) Filter(typ string, val any) common.FilterResult {
|
|
||||||
switch typ {
|
|
||||||
case "cid":
|
|
||||||
id := val.(cid.ID)
|
|
||||||
return common.IfThenElse(r.id.Equals(id), common.Yes, common.No)
|
|
||||||
default:
|
|
||||||
return common.No
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *ECInfoRecord) Filter(typ string, val any) common.FilterResult {
|
|
||||||
switch typ {
|
|
||||||
case "oid":
|
|
||||||
id := val.(oid.ID)
|
|
||||||
return common.IfThenElse(r.id.Equals(id), common.Yes, common.No)
|
|
||||||
default:
|
|
||||||
return common.No
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,251 +0,0 @@
|
||||||
package records
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/binary"
|
|
||||||
"errors"
|
|
||||||
"strconv"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
|
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
|
||||||
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
ErrInvalidKeyLength = errors.New("invalid key length")
|
|
||||||
ErrInvalidValueLength = errors.New("invalid value length")
|
|
||||||
ErrInvalidPrefix = errors.New("invalid prefix")
|
|
||||||
)
|
|
||||||
|
|
||||||
func GraveyardRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) {
|
|
||||||
if len(key) != 64 {
|
|
||||||
return nil, nil, ErrInvalidKeyLength
|
|
||||||
}
|
|
||||||
if len(value) != 64 {
|
|
||||||
return nil, nil, ErrInvalidValueLength
|
|
||||||
}
|
|
||||||
var (
|
|
||||||
cnr cid.ID
|
|
||||||
obj oid.ID
|
|
||||||
r GraveyardRecord
|
|
||||||
)
|
|
||||||
|
|
||||||
_ = cnr.Decode(key[:32])
|
|
||||||
_ = obj.Decode(key[32:])
|
|
||||||
|
|
||||||
r.object.SetContainer(cnr)
|
|
||||||
r.object.SetObject(obj)
|
|
||||||
|
|
||||||
_ = cnr.Decode(value[:32])
|
|
||||||
_ = obj.Decode(value[32:])
|
|
||||||
|
|
||||||
r.tombstone.SetContainer(cnr)
|
|
||||||
r.tombstone.SetObject(obj)
|
|
||||||
|
|
||||||
return &r, nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func GarbageRecordParser(key, _ []byte) (common.SchemaEntry, common.Parser, error) {
|
|
||||||
if len(key) != 64 {
|
|
||||||
return nil, nil, ErrInvalidKeyLength
|
|
||||||
}
|
|
||||||
var (
|
|
||||||
cnr cid.ID
|
|
||||||
obj oid.ID
|
|
||||||
r GarbageRecord
|
|
||||||
)
|
|
||||||
|
|
||||||
_ = cnr.Decode(key[:32])
|
|
||||||
_ = obj.Decode(key[32:])
|
|
||||||
|
|
||||||
r.addr.SetContainer(cnr)
|
|
||||||
r.addr.SetObject(obj)
|
|
||||||
|
|
||||||
return &r, nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func ContainerVolumeRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) {
|
|
||||||
if len(key) != 32 {
|
|
||||||
return nil, nil, ErrInvalidKeyLength
|
|
||||||
}
|
|
||||||
if len(value) != 8 {
|
|
||||||
return nil, nil, ErrInvalidValueLength
|
|
||||||
}
|
|
||||||
var r ContainerVolumeRecord
|
|
||||||
|
|
||||||
_ = r.id.Decode(key)
|
|
||||||
r.volume = binary.LittleEndian.Uint64(value)
|
|
||||||
|
|
||||||
return &r, nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func LockedRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) {
|
|
||||||
var (
|
|
||||||
r LockedRecord
|
|
||||||
err error
|
|
||||||
)
|
|
||||||
|
|
||||||
if err := r.id.Decode(key); err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
if r.ids, err = DecodeOIDs(value); err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
return &r, nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func ShardInfoRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) {
|
|
||||||
if len(key) == 0 {
|
|
||||||
return nil, nil, ErrInvalidKeyLength
|
|
||||||
}
|
|
||||||
|
|
||||||
var r ShardInfoRecord
|
|
||||||
if string(key) == "id" {
|
|
||||||
r.label = string(key)
|
|
||||||
r.value = shard.ID(value).String()
|
|
||||||
|
|
||||||
return &r, nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(value) != 8 {
|
|
||||||
return nil, nil, ErrInvalidValueLength
|
|
||||||
}
|
|
||||||
r.label = string(key)
|
|
||||||
r.value = strconv.FormatUint(binary.LittleEndian.Uint64(value), 10)
|
|
||||||
|
|
||||||
return &r, nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func ObjectRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) {
|
|
||||||
if len(key) != 32 {
|
|
||||||
return nil, nil, ErrInvalidKeyLength
|
|
||||||
}
|
|
||||||
var r ObjectRecord
|
|
||||||
|
|
||||||
_ = r.id.Decode(key)
|
|
||||||
if err := r.object.Unmarshal(value); err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return &r, nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func SmallRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) {
|
|
||||||
var r SmallRecord
|
|
||||||
if err := r.id.Decode(key); err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
if len(value) != 0 {
|
|
||||||
x := string(value)
|
|
||||||
r.storageID = &x
|
|
||||||
}
|
|
||||||
return &r, nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func RootRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) {
|
|
||||||
var r RootRecord
|
|
||||||
if err := r.id.Decode(key); err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
if len(value) == 0 {
|
|
||||||
return &r, nil, nil
|
|
||||||
}
|
|
||||||
r.info = &objectSDK.SplitInfo{}
|
|
||||||
if err := r.info.Unmarshal(value); err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
return &r, nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func OwnerRecordParser(key, _ []byte) (common.SchemaEntry, common.Parser, error) {
|
|
||||||
var r OwnerRecord
|
|
||||||
if err := r.id.Decode(key); err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
return &r, nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func UserAttributeRecordParser(key, _ []byte) (common.SchemaEntry, common.Parser, error) {
|
|
||||||
var r UserAttributeRecord
|
|
||||||
if err := r.id.Decode(key); err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
return &r, nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func PayloadHashRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) {
|
|
||||||
if len(key) != 32 {
|
|
||||||
return nil, nil, ErrInvalidKeyLength
|
|
||||||
}
|
|
||||||
var (
|
|
||||||
err error
|
|
||||||
r PayloadHashRecord
|
|
||||||
)
|
|
||||||
|
|
||||||
r.checksum.SetSHA256([32]byte(key))
|
|
||||||
if r.ids, err = DecodeOIDs(value); err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
return &r, nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func ParentRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) {
|
|
||||||
var (
|
|
||||||
r ParentRecord
|
|
||||||
err error
|
|
||||||
)
|
|
||||||
if err = r.parent.Decode(key); err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
if r.ids, err = DecodeOIDs(value); err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
return &r, nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func SplitRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) {
|
|
||||||
var (
|
|
||||||
err error
|
|
||||||
r SplitRecord
|
|
||||||
)
|
|
||||||
if err = r.id.UnmarshalBinary(key); err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
if r.ids, err = DecodeOIDs(value); err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
return &r, nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func ContainerCountersRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) {
|
|
||||||
if len(value) != 24 {
|
|
||||||
return nil, nil, ErrInvalidValueLength
|
|
||||||
}
|
|
||||||
|
|
||||||
var r ContainerCountersRecord
|
|
||||||
if err := r.id.Decode(key); err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
r.logical = binary.LittleEndian.Uint64(value[:8])
|
|
||||||
r.physical = binary.LittleEndian.Uint64(value[8:16])
|
|
||||||
r.user = binary.LittleEndian.Uint64(value[16:24])
|
|
||||||
|
|
||||||
return &r, nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func ECInfoRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) {
|
|
||||||
var (
|
|
||||||
r ECInfoRecord
|
|
||||||
err error
|
|
||||||
)
|
|
||||||
|
|
||||||
if err := r.id.Decode(key); err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
if r.ids, err = DecodeOIDs(value); err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
return &r, nil, nil
|
|
||||||
}
|
|
|
@ -1,135 +0,0 @@
|
||||||
package records
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
|
|
||||||
"github.com/gdamore/tcell/v2"
|
|
||||||
"github.com/rivo/tview"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (r *GraveyardRecord) String() string {
|
|
||||||
return fmt.Sprintf(
|
|
||||||
"Object CID %s OID %s %c Tombstone CID %s OID %s",
|
|
||||||
common.FormatSimple(fmt.Sprintf("%-44s", r.object.Container()), tcell.ColorAqua),
|
|
||||||
common.FormatSimple(fmt.Sprintf("%-44s", r.object.Object()), tcell.ColorAqua),
|
|
||||||
tview.Borders.Vertical,
|
|
||||||
common.FormatSimple(fmt.Sprintf("%-44s", r.tombstone.Container()), tcell.ColorAqua),
|
|
||||||
common.FormatSimple(fmt.Sprintf("%-44s", r.tombstone.Object()), tcell.ColorAqua),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *GarbageRecord) String() string {
|
|
||||||
return fmt.Sprintf(
|
|
||||||
"CID %-44s OID %-44s",
|
|
||||||
common.FormatSimple(fmt.Sprintf("%-44s", r.addr.Container()), tcell.ColorAqua),
|
|
||||||
common.FormatSimple(fmt.Sprintf("%-44s", r.addr.Object()), tcell.ColorAqua),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *ContainerVolumeRecord) String() string {
|
|
||||||
return fmt.Sprintf(
|
|
||||||
"CID %-44s %c %d",
|
|
||||||
common.FormatSimple(fmt.Sprintf("%-44s", r.id), tcell.ColorAqua),
|
|
||||||
tview.Borders.Vertical,
|
|
||||||
r.volume,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *LockedRecord) String() string {
|
|
||||||
return fmt.Sprintf(
|
|
||||||
"Locker OID %s %c Locked [%d]OID {...}",
|
|
||||||
common.FormatSimple(fmt.Sprintf("%-44s", r.id), tcell.ColorAqua),
|
|
||||||
tview.Borders.Vertical,
|
|
||||||
len(r.ids),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *ShardInfoRecord) String() string {
|
|
||||||
return fmt.Sprintf("%-13s %c %s", r.label, tview.Borders.Vertical, r.value)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *ObjectRecord) String() string {
|
|
||||||
return fmt.Sprintf(
|
|
||||||
"OID %s %c Object {...}",
|
|
||||||
common.FormatSimple(fmt.Sprintf("%-44s", r.id), tcell.ColorAqua),
|
|
||||||
tview.Borders.Vertical,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *SmallRecord) String() string {
|
|
||||||
s := fmt.Sprintf(
|
|
||||||
"OID %s %c",
|
|
||||||
common.FormatSimple(fmt.Sprintf("%-44s", r.id), tcell.ColorAqua),
|
|
||||||
tview.Borders.Vertical,
|
|
||||||
)
|
|
||||||
if r.storageID != nil {
|
|
||||||
s = fmt.Sprintf("%s %s", s, *r.storageID)
|
|
||||||
}
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *RootRecord) String() string {
|
|
||||||
s := fmt.Sprintf(
|
|
||||||
"Root OID %s %c",
|
|
||||||
common.FormatSimple(fmt.Sprintf("%-44s", r.id), tcell.ColorAqua),
|
|
||||||
tview.Borders.Vertical,
|
|
||||||
)
|
|
||||||
if r.info != nil {
|
|
||||||
s += " Split info {...}"
|
|
||||||
}
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *OwnerRecord) String() string {
|
|
||||||
return "OID " + common.FormatSimple(r.id.String(), tcell.ColorAqua)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *UserAttributeRecord) String() string {
|
|
||||||
return "OID " + common.FormatSimple(r.id.String(), tcell.ColorAqua)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *PayloadHashRecord) String() string {
|
|
||||||
return fmt.Sprintf(
|
|
||||||
"Checksum %s %c [%d]OID {...}",
|
|
||||||
common.FormatSimple(r.checksum.String(), tcell.ColorAqua),
|
|
||||||
tview.Borders.Vertical,
|
|
||||||
len(r.ids),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *ParentRecord) String() string {
|
|
||||||
return fmt.Sprintf(
|
|
||||||
"Parent OID %s %c [%d]OID {...}",
|
|
||||||
common.FormatSimple(fmt.Sprintf("%-44s", r.parent), tcell.ColorAqua),
|
|
||||||
tview.Borders.Vertical,
|
|
||||||
len(r.ids),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *SplitRecord) String() string {
|
|
||||||
return fmt.Sprintf(
|
|
||||||
"Split ID %s %c [%d]OID {...}",
|
|
||||||
common.FormatSimple(r.id.String(), tcell.ColorAqua),
|
|
||||||
tview.Borders.Vertical,
|
|
||||||
len(r.ids),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *ContainerCountersRecord) String() string {
|
|
||||||
return fmt.Sprintf(
|
|
||||||
"CID %s %c logical %d, physical %d, user %d",
|
|
||||||
common.FormatSimple(fmt.Sprintf("%-44s", r.id), tcell.ColorAqua),
|
|
||||||
tview.Borders.Vertical,
|
|
||||||
r.logical, r.physical, r.user,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *ECInfoRecord) String() string {
|
|
||||||
return fmt.Sprintf(
|
|
||||||
"OID %s %c [%d]OID {...}",
|
|
||||||
common.FormatSimple(fmt.Sprintf("%-44s", r.id), tcell.ColorAqua),
|
|
||||||
tview.Borders.Vertical,
|
|
||||||
len(r.ids),
|
|
||||||
)
|
|
||||||
}
|
|
|
@ -1,82 +0,0 @@
|
||||||
package records
|
|
||||||
|
|
||||||
import (
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
|
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
|
||||||
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
|
||||||
"github.com/google/uuid"
|
|
||||||
)
|
|
||||||
|
|
||||||
type (
|
|
||||||
GraveyardRecord struct {
|
|
||||||
object, tombstone oid.Address
|
|
||||||
}
|
|
||||||
|
|
||||||
GarbageRecord struct {
|
|
||||||
addr oid.Address
|
|
||||||
}
|
|
||||||
|
|
||||||
ContainerVolumeRecord struct {
|
|
||||||
id cid.ID
|
|
||||||
volume uint64
|
|
||||||
}
|
|
||||||
|
|
||||||
LockedRecord struct {
|
|
||||||
id oid.ID
|
|
||||||
ids []oid.ID
|
|
||||||
}
|
|
||||||
|
|
||||||
ShardInfoRecord struct {
|
|
||||||
label string
|
|
||||||
value string
|
|
||||||
}
|
|
||||||
|
|
||||||
ObjectRecord struct {
|
|
||||||
id oid.ID
|
|
||||||
object objectSDK.Object
|
|
||||||
}
|
|
||||||
|
|
||||||
SmallRecord struct {
|
|
||||||
id oid.ID
|
|
||||||
storageID *string // optional
|
|
||||||
}
|
|
||||||
|
|
||||||
RootRecord struct {
|
|
||||||
id oid.ID
|
|
||||||
info *objectSDK.SplitInfo // optional
|
|
||||||
}
|
|
||||||
|
|
||||||
OwnerRecord struct {
|
|
||||||
id oid.ID
|
|
||||||
}
|
|
||||||
|
|
||||||
UserAttributeRecord struct {
|
|
||||||
id oid.ID
|
|
||||||
}
|
|
||||||
|
|
||||||
PayloadHashRecord struct {
|
|
||||||
checksum checksum.Checksum
|
|
||||||
ids []oid.ID
|
|
||||||
}
|
|
||||||
|
|
||||||
ParentRecord struct {
|
|
||||||
parent oid.ID
|
|
||||||
ids []oid.ID
|
|
||||||
}
|
|
||||||
|
|
||||||
SplitRecord struct {
|
|
||||||
id uuid.UUID
|
|
||||||
ids []oid.ID
|
|
||||||
}
|
|
||||||
|
|
||||||
ContainerCountersRecord struct {
|
|
||||||
id cid.ID
|
|
||||||
logical, physical, user uint64
|
|
||||||
}
|
|
||||||
|
|
||||||
ECInfoRecord struct {
|
|
||||||
id oid.ID
|
|
||||||
ids []oid.ID
|
|
||||||
}
|
|
||||||
)
|
|
|
@ -1,20 +0,0 @@
|
||||||
package records
|
|
||||||
|
|
||||||
import (
|
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
|
||||||
"github.com/nspcc-dev/neo-go/pkg/io"
|
|
||||||
)
|
|
||||||
|
|
||||||
func DecodeOIDs(data []byte) ([]oid.ID, error) {
|
|
||||||
r := io.NewBinReaderFromBuf(data)
|
|
||||||
|
|
||||||
size := r.ReadVarUint()
|
|
||||||
oids := make([]oid.ID, size)
|
|
||||||
|
|
||||||
for i := uint64(0); i < size; i++ {
|
|
||||||
if err := oids[i].Decode(r.ReadVarBytes()); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return oids, nil
|
|
||||||
}
|
|
|
@ -1,63 +0,0 @@
|
||||||
package writecache
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
|
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
|
||||||
"github.com/mr-tron/base58"
|
|
||||||
)
|
|
||||||
|
|
||||||
var WritecacheParser = common.WithFallback(
|
|
||||||
DefaultBucketParser,
|
|
||||||
common.RawParser.ToFallbackParser(),
|
|
||||||
)
|
|
||||||
|
|
||||||
func DefaultBucketParser(key, value []byte) (common.SchemaEntry, common.Parser, error) {
|
|
||||||
if value != nil {
|
|
||||||
return nil, nil, errors.New("not a bucket")
|
|
||||||
}
|
|
||||||
if !bytes.Equal(key, []byte{0}) {
|
|
||||||
return nil, nil, errors.New("invalid key")
|
|
||||||
}
|
|
||||||
return &DefaultBucket{}, DefaultRecordParser, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func DefaultRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) {
|
|
||||||
parts := strings.Split(string(key), "/")
|
|
||||||
|
|
||||||
if len(parts) != 2 {
|
|
||||||
return nil, nil, errors.New("invalid key, expected address string <CID>/<OID>")
|
|
||||||
}
|
|
||||||
|
|
||||||
cnrRaw, err := base58.Decode(parts[0])
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, errors.New("can't decode CID string")
|
|
||||||
}
|
|
||||||
objRaw, err := base58.Decode(parts[1])
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, errors.New("can't decode OID string")
|
|
||||||
}
|
|
||||||
|
|
||||||
cnr := cid.ID{}
|
|
||||||
if err := cnr.Decode(cnrRaw); err != nil {
|
|
||||||
return nil, nil, fmt.Errorf("can't decode CID: %w", err)
|
|
||||||
}
|
|
||||||
obj := oid.ID{}
|
|
||||||
if err := obj.Decode(objRaw); err != nil {
|
|
||||||
return nil, nil, fmt.Errorf("can't decode OID: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var r DefaultRecord
|
|
||||||
|
|
||||||
r.addr.SetContainer(cnr)
|
|
||||||
r.addr.SetObject(obj)
|
|
||||||
|
|
||||||
r.data = value[:]
|
|
||||||
|
|
||||||
return &r, nil, nil
|
|
||||||
}
|
|
|
@ -1,66 +0,0 @@
|
||||||
package writecache
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
|
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
|
||||||
"github.com/davecgh/go-spew/spew"
|
|
||||||
"github.com/gdamore/tcell/v2"
|
|
||||||
"github.com/rivo/tview"
|
|
||||||
)
|
|
||||||
|
|
||||||
type (
|
|
||||||
DefaultBucket struct{}
|
|
||||||
|
|
||||||
DefaultRecord struct {
|
|
||||||
addr oid.Address
|
|
||||||
data []byte
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
func (b *DefaultBucket) String() string {
|
|
||||||
return common.FormatSimple("0 Default", tcell.ColorLime)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *DefaultRecord) String() string {
|
|
||||||
return fmt.Sprintf(
|
|
||||||
"CID %s OID %s %c Data {...}",
|
|
||||||
common.FormatSimple(fmt.Sprintf("%-44s", r.addr.Container()), tcell.ColorAqua),
|
|
||||||
common.FormatSimple(fmt.Sprintf("%-44s", r.addr.Object()), tcell.ColorAqua),
|
|
||||||
tview.Borders.Vertical,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *DefaultBucket) DetailedString() string {
|
|
||||||
return spew.Sdump(*b)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *DefaultRecord) DetailedString() string {
|
|
||||||
return spew.Sdump(*r)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *DefaultBucket) Filter(typ string, _ any) common.FilterResult {
|
|
||||||
switch typ {
|
|
||||||
case "cid":
|
|
||||||
return common.Maybe
|
|
||||||
case "oid":
|
|
||||||
return common.Maybe
|
|
||||||
default:
|
|
||||||
return common.No
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *DefaultRecord) Filter(typ string, val any) common.FilterResult {
|
|
||||||
switch typ {
|
|
||||||
case "cid":
|
|
||||||
id := val.(cid.ID)
|
|
||||||
return common.IfThenElse(r.addr.Container().Equals(id), common.Yes, common.No)
|
|
||||||
case "oid":
|
|
||||||
id := val.(oid.ID)
|
|
||||||
return common.IfThenElse(r.addr.Object().Equals(id), common.Yes, common.No)
|
|
||||||
default:
|
|
||||||
return common.No
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,257 +0,0 @@
|
||||||
package tui
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
|
|
||||||
"github.com/gdamore/tcell/v2"
|
|
||||||
"github.com/rivo/tview"
|
|
||||||
)
|
|
||||||
|
|
||||||
type BucketsView struct {
|
|
||||||
*tview.Box
|
|
||||||
|
|
||||||
mu sync.Mutex
|
|
||||||
|
|
||||||
view *tview.TreeView
|
|
||||||
nodeToUpdate *tview.TreeNode
|
|
||||||
|
|
||||||
ui *UI
|
|
||||||
filter *Filter
|
|
||||||
}
|
|
||||||
|
|
||||||
type bucketNode struct {
|
|
||||||
bucket *Bucket
|
|
||||||
filter *Filter
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewBucketsView(ui *UI, filter *Filter) *BucketsView {
|
|
||||||
return &BucketsView{
|
|
||||||
Box: tview.NewBox(),
|
|
||||||
view: tview.NewTreeView(),
|
|
||||||
ui: ui,
|
|
||||||
filter: filter,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *BucketsView) Mount(_ context.Context) error {
|
|
||||||
root := tview.NewTreeNode(".")
|
|
||||||
root.SetExpanded(false)
|
|
||||||
root.SetSelectable(false)
|
|
||||||
root.SetReference(&bucketNode{
|
|
||||||
bucket: &Bucket{NextParser: v.ui.rootParser},
|
|
||||||
filter: v.filter,
|
|
||||||
})
|
|
||||||
|
|
||||||
v.nodeToUpdate = root
|
|
||||||
|
|
||||||
v.view.SetRoot(root)
|
|
||||||
v.view.SetCurrentNode(root)
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *BucketsView) Update(ctx context.Context) error {
|
|
||||||
if v.nodeToUpdate == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
defer func() { v.nodeToUpdate = nil }()
|
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
ready := make(chan struct{})
|
|
||||||
errCh := make(chan error)
|
|
||||||
|
|
||||||
tmp := tview.NewTreeNode(v.nodeToUpdate.GetText())
|
|
||||||
tmp.SetReference(v.nodeToUpdate.GetReference())
|
|
||||||
|
|
||||||
node := v.nodeToUpdate.GetReference().(*bucketNode)
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
defer close(ready)
|
|
||||||
|
|
||||||
hasBuckets, err := HasBuckets(ctx, v.ui.db, node.bucket.Path)
|
|
||||||
if err != nil {
|
|
||||||
errCh <- err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Show the selected bucket's records instead.
|
|
||||||
if !hasBuckets && node.bucket.NextParser != nil {
|
|
||||||
v.ui.moveNextPage(NewRecordsView(v.ui, node.bucket, node.filter))
|
|
||||||
}
|
|
||||||
|
|
||||||
if v.nodeToUpdate.IsExpanded() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
err = v.loadNodeChildren(ctx, tmp, node.filter)
|
|
||||||
if err != nil {
|
|
||||||
errCh <- err
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
case <-ready:
|
|
||||||
v.mu.Lock()
|
|
||||||
v.nodeToUpdate.SetChildren(tmp.GetChildren())
|
|
||||||
v.nodeToUpdate.SetExpanded(!v.nodeToUpdate.IsExpanded())
|
|
||||||
v.mu.Unlock()
|
|
||||||
case err := <-errCh:
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *BucketsView) Unmount() {
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *BucketsView) Draw(screen tcell.Screen) {
|
|
||||||
x, y, width, height := v.GetInnerRect()
|
|
||||||
v.view.SetRect(x, y, width, height)
|
|
||||||
|
|
||||||
v.view.Draw(screen)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *BucketsView) loadNodeChildren(
|
|
||||||
ctx context.Context, node *tview.TreeNode, filter *Filter,
|
|
||||||
) error {
|
|
||||||
parentBucket := node.GetReference().(*bucketNode).bucket
|
|
||||||
|
|
||||||
path := parentBucket.Path
|
|
||||||
parser := parentBucket.NextParser
|
|
||||||
|
|
||||||
buffer, err := LoadBuckets(ctx, v.ui.db, path, v.ui.loadBufferSize)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
for item := range buffer {
|
|
||||||
if item.err != nil {
|
|
||||||
return item.err
|
|
||||||
}
|
|
||||||
bucket := item.val
|
|
||||||
|
|
||||||
bucket.Entry, bucket.NextParser, err = parser(bucket.Name, nil)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
satisfies, err := v.bucketSatisfiesFilter(ctx, bucket, filter)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if !satisfies {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
child := tview.NewTreeNode(bucket.Entry.String()).
|
|
||||||
SetSelectable(true).
|
|
||||||
SetExpanded(false).
|
|
||||||
SetReference(&bucketNode{
|
|
||||||
bucket: bucket,
|
|
||||||
filter: filter.Apply(bucket.Entry),
|
|
||||||
})
|
|
||||||
|
|
||||||
node.AddChild(child)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *BucketsView) bucketSatisfiesFilter(
|
|
||||||
ctx context.Context, bucket *Bucket, filter *Filter,
|
|
||||||
) (bool, error) {
|
|
||||||
// Does the current bucket satisfies the filter?
|
|
||||||
filter = filter.Apply(bucket.Entry)
|
|
||||||
|
|
||||||
if filter.Result() == common.Yes {
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if filter.Result() == common.No {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
// Check the current bucket's nested buckets if exist
|
|
||||||
bucketsBuffer, err := LoadBuckets(ctx, v.ui.db, bucket.Path, v.ui.loadBufferSize)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
for item := range bucketsBuffer {
|
|
||||||
if item.err != nil {
|
|
||||||
return false, item.err
|
|
||||||
}
|
|
||||||
b := item.val
|
|
||||||
|
|
||||||
b.Entry, b.NextParser, err = bucket.NextParser(b.Name, nil)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
satisfies, err := v.bucketSatisfiesFilter(ctx, b, filter)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
if satisfies {
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check the current bucket's nested records if exist
|
|
||||||
recordsBuffer, err := LoadRecords(ctx, v.ui.db, bucket.Path, v.ui.loadBufferSize)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
for item := range recordsBuffer {
|
|
||||||
if item.err != nil {
|
|
||||||
return false, item.err
|
|
||||||
}
|
|
||||||
r := item.val
|
|
||||||
|
|
||||||
r.Entry, _, err = bucket.NextParser(r.Key, r.Value)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if filter.Apply(r.Entry).Result() == common.Yes {
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *BucketsView) InputHandler() func(event *tcell.EventKey, setFocus func(p tview.Primitive)) {
|
|
||||||
return v.WrapInputHandler(func(event *tcell.EventKey, _ func(tview.Primitive)) {
|
|
||||||
currentNode := v.view.GetCurrentNode()
|
|
||||||
if currentNode == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
switch event.Key() {
|
|
||||||
case tcell.KeyEnter:
|
|
||||||
// Expand or collapse the selected bucket's nested buckets,
|
|
||||||
// otherwise, navigate to that bucket's records.
|
|
||||||
v.nodeToUpdate = currentNode
|
|
||||||
case tcell.KeyCtrlR:
|
|
||||||
// Navigate to the selected bucket's records.
|
|
||||||
bucketNode := currentNode.GetReference().(*bucketNode)
|
|
||||||
v.ui.moveNextPage(NewRecordsView(v.ui, bucketNode.bucket, bucketNode.filter))
|
|
||||||
case tcell.KeyCtrlD:
|
|
||||||
// Navigate to the selected bucket's detailed view.
|
|
||||||
bucketNode := currentNode.GetReference().(*bucketNode)
|
|
||||||
v.ui.moveNextPage(NewDetailedView(bucketNode.bucket.Entry.DetailedString()))
|
|
||||||
default:
|
|
||||||
v.view.InputHandler()(event, func(tview.Primitive) {})
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
|
@ -1,160 +0,0 @@
|
||||||
package tui
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"go.etcd.io/bbolt"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Item[T any] struct {
|
|
||||||
val T
|
|
||||||
err error
|
|
||||||
}
|
|
||||||
|
|
||||||
func resolvePath(tx *bbolt.Tx, path [][]byte) (*bbolt.Bucket, error) {
|
|
||||||
if len(path) == 0 {
|
|
||||||
return nil, errors.New("can't find bucket without path")
|
|
||||||
}
|
|
||||||
|
|
||||||
name := path[0]
|
|
||||||
bucket := tx.Bucket(name)
|
|
||||||
if bucket == nil {
|
|
||||||
return nil, fmt.Errorf("no bucket with name %s", name)
|
|
||||||
}
|
|
||||||
for _, name := range path[1:] {
|
|
||||||
bucket = bucket.Bucket(name)
|
|
||||||
if bucket == nil {
|
|
||||||
return nil, fmt.Errorf("no bucket with name %s", name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return bucket, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func load[T any](
|
|
||||||
ctx context.Context, db *bbolt.DB, path [][]byte, bufferSize int,
|
|
||||||
filter func(key, value []byte) bool, transform func(key, value []byte) T,
|
|
||||||
) (<-chan Item[T], error) {
|
|
||||||
buffer := make(chan Item[T], bufferSize)
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
defer close(buffer)
|
|
||||||
|
|
||||||
err := db.View(func(tx *bbolt.Tx) error {
|
|
||||||
var cursor *bbolt.Cursor
|
|
||||||
if len(path) == 0 {
|
|
||||||
cursor = tx.Cursor()
|
|
||||||
} else {
|
|
||||||
bucket, err := resolvePath(tx, path)
|
|
||||||
if err != nil {
|
|
||||||
buffer <- Item[T]{err: fmt.Errorf("can't find bucket: %w", err)}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
cursor = bucket.Cursor()
|
|
||||||
}
|
|
||||||
|
|
||||||
key, value := cursor.First()
|
|
||||||
for {
|
|
||||||
if key == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if filter != nil && !filter(key, value) {
|
|
||||||
key, value = cursor.Next()
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
return nil
|
|
||||||
case buffer <- Item[T]{val: transform(key, value)}:
|
|
||||||
key, value = cursor.Next()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
buffer <- Item[T]{err: err}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
return buffer, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func LoadBuckets(
|
|
||||||
ctx context.Context, db *bbolt.DB, path [][]byte, bufferSize int,
|
|
||||||
) (<-chan Item[*Bucket], error) {
|
|
||||||
buffer, err := load(
|
|
||||||
ctx, db, path, bufferSize,
|
|
||||||
func(_, value []byte) bool {
|
|
||||||
return value == nil
|
|
||||||
},
|
|
||||||
func(key, _ []byte) *Bucket {
|
|
||||||
base := make([][]byte, 0, len(path))
|
|
||||||
base = append(base, path...)
|
|
||||||
|
|
||||||
return &Bucket{
|
|
||||||
Name: key,
|
|
||||||
Path: append(base, key),
|
|
||||||
}
|
|
||||||
},
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("can't start iterating bucket: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return buffer, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func LoadRecords(
|
|
||||||
ctx context.Context, db *bbolt.DB, path [][]byte, bufferSize int,
|
|
||||||
) (<-chan Item[*Record], error) {
|
|
||||||
buffer, err := load(
|
|
||||||
ctx, db, path, bufferSize,
|
|
||||||
func(_, value []byte) bool {
|
|
||||||
return value != nil
|
|
||||||
},
|
|
||||||
func(key, value []byte) *Record {
|
|
||||||
base := make([][]byte, 0, len(path))
|
|
||||||
base = append(base, path...)
|
|
||||||
|
|
||||||
return &Record{
|
|
||||||
Key: key,
|
|
||||||
Value: value,
|
|
||||||
Path: append(base, key),
|
|
||||||
}
|
|
||||||
},
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("can't start iterating bucket: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return buffer, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// HasBuckets checks if a bucket has nested buckets. It relies on assumption
|
|
||||||
// that a bucket can have either nested buckets or records but not both.
|
|
||||||
func HasBuckets(ctx context.Context, db *bbolt.DB, path [][]byte) (bool, error) {
|
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
buffer, err := load(
|
|
||||||
ctx, db, path, 1,
|
|
||||||
nil,
|
|
||||||
func(_, value []byte) []byte { return value },
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
x, ok := <-buffer
|
|
||||||
if !ok {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
if x.err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
if x.val != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
return true, nil
|
|
||||||
}
|
|
|
@ -1,24 +0,0 @@
|
||||||
package tui
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
|
|
||||||
"github.com/rivo/tview"
|
|
||||||
)
|
|
||||||
|
|
||||||
type DetailedView struct {
|
|
||||||
*tview.TextView
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewDetailedView(detailed string) *DetailedView {
|
|
||||||
v := &DetailedView{
|
|
||||||
TextView: tview.NewTextView(),
|
|
||||||
}
|
|
||||||
v.SetDynamicColors(true)
|
|
||||||
v.SetText(detailed)
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *DetailedView) Mount(_ context.Context) error { return nil }
|
|
||||||
func (v *DetailedView) Update(_ context.Context) error { return nil }
|
|
||||||
func (v *DetailedView) Unmount() {}
|
|
|
@ -1,44 +0,0 @@
|
||||||
package tui
|
|
||||||
|
|
||||||
import (
|
|
||||||
"maps"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Filter struct {
|
|
||||||
values map[string]any
|
|
||||||
results map[string]common.FilterResult
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewFilter(values map[string]any) *Filter {
|
|
||||||
f := &Filter{
|
|
||||||
values: maps.Clone(values),
|
|
||||||
results: make(map[string]common.FilterResult),
|
|
||||||
}
|
|
||||||
for tag := range values {
|
|
||||||
f.results[tag] = common.No
|
|
||||||
}
|
|
||||||
return f
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Filter) Apply(e common.SchemaEntry) *Filter {
|
|
||||||
filter := &Filter{
|
|
||||||
values: f.values,
|
|
||||||
results: maps.Clone(f.results),
|
|
||||||
}
|
|
||||||
|
|
||||||
for tag, value := range filter.values {
|
|
||||||
filter.results[tag] = max(filter.results[tag], e.Filter(tag, value))
|
|
||||||
}
|
|
||||||
|
|
||||||
return filter
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Filter) Result() common.FilterResult {
|
|
||||||
current := common.Yes
|
|
||||||
for _, r := range f.results {
|
|
||||||
current = min(r, current)
|
|
||||||
}
|
|
||||||
return current
|
|
||||||
}
|
|
|
@ -1,38 +0,0 @@
|
||||||
[green::b]HOTKEYS[-::-]
|
|
||||||
|
|
||||||
[green::b]Navigation[-::-]
|
|
||||||
|
|
||||||
[yellow::b]Down Arrow[-::-] / [yellow::b]j[-::-]
|
|
||||||
Scroll down.
|
|
||||||
|
|
||||||
[yellow::b]Up Arrow[-::-] / [yellow::b]k[-::-]
|
|
||||||
Scroll up.
|
|
||||||
|
|
||||||
[yellow::b]Page Down[-::-] / [yellow::b]Ctrl-f[-::-]
|
|
||||||
Scroll down by a full page.
|
|
||||||
|
|
||||||
[yellow::b]Page Up[-::-] / [yellow::b]Ctrl-b[-::-]
|
|
||||||
Scroll up by a full page.
|
|
||||||
|
|
||||||
[green::b]Actions[-::-]
|
|
||||||
|
|
||||||
[yellow::b]Enter[-::-]
|
|
||||||
Perform actions based on the current context:
|
|
||||||
- In Buckets View:
|
|
||||||
- Expand/collapse the selected bucket to show/hide its nested buckets.
|
|
||||||
- If no nested buckets exist, navigate to the selected bucket's records.
|
|
||||||
- In Records View: Open the detailed view of the selected record.
|
|
||||||
|
|
||||||
[yellow::b]Escape[-::-]
|
|
||||||
Return to the previous page, opposite of [yellow::b]Enter[-::-].
|
|
||||||
|
|
||||||
Refer to the [green::b]SEARCHING[-::-] section for more specific actions.
|
|
||||||
|
|
||||||
|
|
||||||
[green::b]Alternative Action Hotkeys[-::-]
|
|
||||||
|
|
||||||
[yellow::b]Ctrl-r[-::-]
|
|
||||||
Directly navigate to the selected bucket's records.
|
|
||||||
|
|
||||||
[yellow::b]Ctrl-d[-::-]
|
|
||||||
Access the detailed view of the selected bucket.
|
|
|
@ -1,26 +0,0 @@
|
||||||
[green::b]SEARCHING[-::-]
|
|
||||||
|
|
||||||
[green::b]Hotkeys[-::-]
|
|
||||||
|
|
||||||
[yellow::b]/[-::-]
|
|
||||||
Initiate the search prompt.
|
|
||||||
- The prompt follows this syntax: [yellow::b]tag:value [+ tag:value]...[-::-]
|
|
||||||
- Multiple filter can be combined with [yellow::b]+[-::-], the result is an intersection of those filters' result sets.
|
|
||||||
- Any leading and trailing whitespace will be ignored.
|
|
||||||
- An empty prompt will return all results with no filters applied.
|
|
||||||
- Refer to the [green::b]Available Search Filters[-::-] section below for a list of valid filter tags.
|
|
||||||
|
|
||||||
[yellow::b]Enter[-::-]
|
|
||||||
Execute the search based on the entered prompt.
|
|
||||||
- If the prompt is invalid, an error message will be displayed.
|
|
||||||
|
|
||||||
[yellow::b]Escape[-::-]
|
|
||||||
Exit the search prompt without performing a search.
|
|
||||||
|
|
||||||
[yellow::b]Down Arrow[-::-], [yellow::b]Up Arrow[-::-]
|
|
||||||
Scroll through the search history.
|
|
||||||
|
|
||||||
|
|
||||||
[green::b]Available Search Filters[-::-]
|
|
||||||
|
|
||||||
%s
|
|
|
@ -1,101 +0,0 @@
|
||||||
package tui
|
|
||||||
|
|
||||||
import (
|
|
||||||
_ "embed"
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/gdamore/tcell/v2"
|
|
||||||
"github.com/rivo/tview"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
//go:embed help-pages/hotkeys.txt
|
|
||||||
hotkeysHelpText string
|
|
||||||
|
|
||||||
//go:embed help-pages/searching.txt
|
|
||||||
searchingHelpText string
|
|
||||||
)
|
|
||||||
|
|
||||||
type HelpPage struct {
|
|
||||||
*tview.Box
|
|
||||||
pages []*tview.TextView
|
|
||||||
currentPage int
|
|
||||||
|
|
||||||
filters []string
|
|
||||||
filterHints map[string]string
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewHelpPage(filters []string, hints map[string]string) *HelpPage {
|
|
||||||
hp := &HelpPage{
|
|
||||||
Box: tview.NewBox(),
|
|
||||||
filters: filters,
|
|
||||||
filterHints: hints,
|
|
||||||
}
|
|
||||||
|
|
||||||
page := tview.NewTextView().
|
|
||||||
SetDynamicColors(true).
|
|
||||||
SetText(hotkeysHelpText)
|
|
||||||
hp.addPage(page)
|
|
||||||
|
|
||||||
page = tview.NewTextView().
|
|
||||||
SetDynamicColors(true).
|
|
||||||
SetText(fmt.Sprintf(searchingHelpText, hp.getFiltersText()))
|
|
||||||
hp.addPage(page)
|
|
||||||
|
|
||||||
return hp
|
|
||||||
}
|
|
||||||
|
|
||||||
func (hp *HelpPage) addPage(page *tview.TextView) {
|
|
||||||
hp.pages = append(hp.pages, page)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (hp *HelpPage) getFiltersText() string {
|
|
||||||
if len(hp.filters) == 0 {
|
|
||||||
return "\t\tNo filters defined.\n"
|
|
||||||
}
|
|
||||||
|
|
||||||
filtersText := strings.Builder{}
|
|
||||||
gapSize := 4
|
|
||||||
|
|
||||||
tagMaxWidth := 3
|
|
||||||
for _, filter := range hp.filters {
|
|
||||||
tagMaxWidth = max(tagMaxWidth, len(filter))
|
|
||||||
}
|
|
||||||
filtersText.WriteString("\t\t[yellow::b]Tag")
|
|
||||||
filtersText.WriteString(strings.Repeat(" ", gapSize))
|
|
||||||
filtersText.WriteString("\tValue[-::-]\n\n")
|
|
||||||
|
|
||||||
for _, filter := range hp.filters {
|
|
||||||
filtersText.WriteString("\t\t")
|
|
||||||
filtersText.WriteString(filter)
|
|
||||||
filtersText.WriteString(strings.Repeat(" ", tagMaxWidth-len(filter)+gapSize))
|
|
||||||
filtersText.WriteString(hp.filterHints[filter])
|
|
||||||
filtersText.WriteRune('\n')
|
|
||||||
}
|
|
||||||
|
|
||||||
return filtersText.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (hp *HelpPage) Draw(screen tcell.Screen) {
|
|
||||||
x, y, width, height := hp.GetInnerRect()
|
|
||||||
hp.pages[hp.currentPage].SetRect(x+1, y+1, width-2, height-2)
|
|
||||||
hp.pages[hp.currentPage].Draw(screen)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (hp *HelpPage) InputHandler() func(event *tcell.EventKey, setFocus func(p tview.Primitive)) {
|
|
||||||
return hp.WrapInputHandler(func(event *tcell.EventKey, _ func(tview.Primitive)) {
|
|
||||||
if event.Key() == tcell.KeyEnter {
|
|
||||||
hp.currentPage++
|
|
||||||
hp.currentPage %= len(hp.pages)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
hp.pages[hp.currentPage].InputHandler()(event, func(tview.Primitive) {})
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (hp *HelpPage) MouseHandler() func(action tview.MouseAction, event *tcell.EventMouse, setFocus func(p tview.Primitive)) (consumed bool, capture tview.Primitive) {
|
|
||||||
return hp.WrapMouseHandler(func(action tview.MouseAction, event *tcell.EventMouse, _ func(tview.Primitive)) (consumed bool, capture tview.Primitive) {
|
|
||||||
return hp.pages[hp.currentPage].MouseHandler()(action, event, func(tview.Primitive) {})
|
|
||||||
})
|
|
||||||
}
|
|
|
@ -1,77 +0,0 @@
|
||||||
package tui
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/gdamore/tcell/v2"
|
|
||||||
"github.com/rivo/tview"
|
|
||||||
)
|
|
||||||
|
|
||||||
type InputFieldWithHistory struct {
|
|
||||||
*tview.InputField
|
|
||||||
history []string
|
|
||||||
historyLimit int
|
|
||||||
historyPointer int
|
|
||||||
currentContent string
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewInputFieldWithHistory(historyLimit int) *InputFieldWithHistory {
|
|
||||||
return &InputFieldWithHistory{
|
|
||||||
InputField: tview.NewInputField(),
|
|
||||||
historyLimit: historyLimit,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *InputFieldWithHistory) AddToHistory(s string) {
|
|
||||||
// Stop scrolling history on history change, need to start scrolling again.
|
|
||||||
defer func() { f.historyPointer = len(f.history) }()
|
|
||||||
|
|
||||||
// Used history data for search prompt, so just make that data recent.
|
|
||||||
if f.historyPointer != len(f.history) && s == f.history[f.historyPointer] {
|
|
||||||
f.history = append(f.history[:f.historyPointer], f.history[f.historyPointer+1:]...)
|
|
||||||
f.history = append(f.history, s)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(f.history) == f.historyLimit {
|
|
||||||
f.history = f.history[1:]
|
|
||||||
}
|
|
||||||
f.history = append(f.history, s)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *InputFieldWithHistory) InputHandler() func(event *tcell.EventKey, setFocus func(p tview.Primitive)) {
|
|
||||||
return f.WrapInputHandler(func(event *tcell.EventKey, _ func(tview.Primitive)) {
|
|
||||||
switch event.Key() {
|
|
||||||
case tcell.KeyDown:
|
|
||||||
if len(f.history) == 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Need to start iterating before.
|
|
||||||
if f.historyPointer == len(f.history) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Iterate to most recent prompts.
|
|
||||||
f.historyPointer++
|
|
||||||
// Stop iterating over history.
|
|
||||||
if f.historyPointer == len(f.history) {
|
|
||||||
f.InputField.SetText(f.currentContent)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
f.InputField.SetText(f.history[f.historyPointer])
|
|
||||||
case tcell.KeyUp:
|
|
||||||
if len(f.history) == 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Start iterating over history.
|
|
||||||
if f.historyPointer == len(f.history) {
|
|
||||||
f.currentContent = f.InputField.GetText()
|
|
||||||
}
|
|
||||||
// End of history.
|
|
||||||
if f.historyPointer == 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Iterate to least recent prompts.
|
|
||||||
f.historyPointer--
|
|
||||||
f.InputField.SetText(f.history[f.historyPointer])
|
|
||||||
default:
|
|
||||||
f.InputField.InputHandler()(event, func(tview.Primitive) {})
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
|
@ -1,72 +0,0 @@
|
||||||
package tui
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"sync/atomic"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/gdamore/tcell/v2"
|
|
||||||
"github.com/rivo/tview"
|
|
||||||
)
|
|
||||||
|
|
||||||
type LoadingBar struct {
|
|
||||||
*tview.Box
|
|
||||||
view *tview.TextView
|
|
||||||
secondsElapsed atomic.Int64
|
|
||||||
needDrawFunc func()
|
|
||||||
reset func()
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewLoadingBar(needDrawFunc func()) *LoadingBar {
|
|
||||||
b := &LoadingBar{
|
|
||||||
Box: tview.NewBox(),
|
|
||||||
view: tview.NewTextView(),
|
|
||||||
needDrawFunc: needDrawFunc,
|
|
||||||
}
|
|
||||||
b.view.SetBackgroundColor(tview.Styles.PrimaryTextColor)
|
|
||||||
b.view.SetTextColor(b.GetBackgroundColor())
|
|
||||||
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *LoadingBar) Start(ctx context.Context) {
|
|
||||||
ctx, b.reset = context.WithCancel(ctx)
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
ticker := time.NewTicker(1 * time.Second)
|
|
||||||
defer ticker.Stop()
|
|
||||||
|
|
||||||
b.secondsElapsed.Store(0)
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
return
|
|
||||||
case <-ticker.C:
|
|
||||||
b.secondsElapsed.Add(1)
|
|
||||||
b.needDrawFunc()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *LoadingBar) Stop() {
|
|
||||||
b.reset()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *LoadingBar) Draw(screen tcell.Screen) {
|
|
||||||
seconds := b.secondsElapsed.Load()
|
|
||||||
|
|
||||||
var time string
|
|
||||||
switch {
|
|
||||||
case seconds < 60:
|
|
||||||
time = fmt.Sprintf("%ds", seconds)
|
|
||||||
default:
|
|
||||||
time = fmt.Sprintf("%dm%ds", seconds/60, seconds%60)
|
|
||||||
}
|
|
||||||
b.view.SetText(fmt.Sprintf(" Loading... %s (press Escape to cancel) ", time))
|
|
||||||
|
|
||||||
x, y, width, _ := b.GetInnerRect()
|
|
||||||
b.view.SetRect(x, y, width, 1)
|
|
||||||
b.view.Draw(screen)
|
|
||||||
}
|
|
|
@ -1,271 +0,0 @@
|
||||||
package tui
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"math"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
|
|
||||||
"github.com/gdamore/tcell/v2"
|
|
||||||
"github.com/rivo/tview"
|
|
||||||
)
|
|
||||||
|
|
||||||
type updateType int
|
|
||||||
|
|
||||||
const (
|
|
||||||
other updateType = iota
|
|
||||||
moveToPrevPage
|
|
||||||
moveToNextPage
|
|
||||||
moveUp
|
|
||||||
moveDown
|
|
||||||
moveHome
|
|
||||||
moveEnd
|
|
||||||
)
|
|
||||||
|
|
||||||
type RecordsView struct {
|
|
||||||
*tview.Box
|
|
||||||
|
|
||||||
mu sync.RWMutex
|
|
||||||
|
|
||||||
onUnmount func()
|
|
||||||
|
|
||||||
bucket *Bucket
|
|
||||||
records []*Record
|
|
||||||
|
|
||||||
buffer chan *Record
|
|
||||||
|
|
||||||
firstRecordIndex int
|
|
||||||
lastRecordIndex int
|
|
||||||
selectedRecordIndex int
|
|
||||||
|
|
||||||
updateType updateType
|
|
||||||
|
|
||||||
ui *UI
|
|
||||||
filter *Filter
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewRecordsView(ui *UI, bucket *Bucket, filter *Filter) *RecordsView {
|
|
||||||
return &RecordsView{
|
|
||||||
Box: tview.NewBox(),
|
|
||||||
bucket: bucket,
|
|
||||||
ui: ui,
|
|
||||||
filter: filter,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *RecordsView) Mount(ctx context.Context) error {
|
|
||||||
if v.onUnmount != nil {
|
|
||||||
return errors.New("try to mount already mounted component")
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx, v.onUnmount = context.WithCancel(ctx)
|
|
||||||
|
|
||||||
tempBuffer, err := LoadRecords(ctx, v.ui.db, v.bucket.Path, v.ui.loadBufferSize)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
v.buffer = make(chan *Record, v.ui.loadBufferSize)
|
|
||||||
go func() {
|
|
||||||
defer close(v.buffer)
|
|
||||||
|
|
||||||
for item := range tempBuffer {
|
|
||||||
if item.err != nil {
|
|
||||||
v.ui.stopOnError(err)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
record := item.val
|
|
||||||
|
|
||||||
record.Entry, _, err = v.bucket.NextParser(record.Key, record.Value)
|
|
||||||
if err != nil {
|
|
||||||
v.ui.stopOnError(err)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
if v.filter.Apply(record.Entry).Result() != common.Yes {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
v.buffer <- record
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *RecordsView) Unmount() {
|
|
||||||
if v.onUnmount == nil {
|
|
||||||
panic("try to unmount not mounted component")
|
|
||||||
}
|
|
||||||
v.onUnmount()
|
|
||||||
v.onUnmount = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *RecordsView) Update(ctx context.Context) error {
|
|
||||||
_, _, _, recordsPerPage := v.GetInnerRect()
|
|
||||||
firstRecordIndex, lastRecordIndex, selectedRecordIndex := v.getNewIndexes()
|
|
||||||
|
|
||||||
loop:
|
|
||||||
for len(v.records) < lastRecordIndex {
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
return nil
|
|
||||||
case record, ok := <-v.buffer:
|
|
||||||
if !ok {
|
|
||||||
break loop
|
|
||||||
}
|
|
||||||
v.records = append(v.records, record)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set the update type to its default value after some specific key event
|
|
||||||
// has been handled.
|
|
||||||
v.updateType = other
|
|
||||||
|
|
||||||
firstRecordIndex = max(0, min(firstRecordIndex, len(v.records)-recordsPerPage))
|
|
||||||
lastRecordIndex = min(firstRecordIndex+recordsPerPage, len(v.records))
|
|
||||||
selectedRecordIndex = min(selectedRecordIndex, lastRecordIndex-1)
|
|
||||||
|
|
||||||
v.mu.Lock()
|
|
||||||
v.firstRecordIndex = firstRecordIndex
|
|
||||||
v.lastRecordIndex = lastRecordIndex
|
|
||||||
v.selectedRecordIndex = selectedRecordIndex
|
|
||||||
v.mu.Unlock()
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *RecordsView) getNewIndexes() (int, int, int) {
|
|
||||||
v.mu.RLock()
|
|
||||||
firstRecordIndex := v.firstRecordIndex
|
|
||||||
lastRecordIndex := v.lastRecordIndex
|
|
||||||
selectedRecordIndex := v.selectedRecordIndex
|
|
||||||
v.mu.RUnlock()
|
|
||||||
|
|
||||||
_, _, _, recordsPerPage := v.GetInnerRect()
|
|
||||||
|
|
||||||
switch v.updateType {
|
|
||||||
case moveUp:
|
|
||||||
if selectedRecordIndex != firstRecordIndex {
|
|
||||||
selectedRecordIndex--
|
|
||||||
break
|
|
||||||
}
|
|
||||||
firstRecordIndex = max(0, firstRecordIndex-1)
|
|
||||||
lastRecordIndex = min(firstRecordIndex+recordsPerPage, len(v.records))
|
|
||||||
selectedRecordIndex = firstRecordIndex
|
|
||||||
case moveToPrevPage:
|
|
||||||
if selectedRecordIndex != firstRecordIndex {
|
|
||||||
selectedRecordIndex = firstRecordIndex
|
|
||||||
break
|
|
||||||
}
|
|
||||||
firstRecordIndex = max(0, firstRecordIndex-recordsPerPage)
|
|
||||||
lastRecordIndex = firstRecordIndex + recordsPerPage
|
|
||||||
selectedRecordIndex = firstRecordIndex
|
|
||||||
case moveDown:
|
|
||||||
if selectedRecordIndex != lastRecordIndex-1 {
|
|
||||||
selectedRecordIndex++
|
|
||||||
break
|
|
||||||
}
|
|
||||||
firstRecordIndex++
|
|
||||||
lastRecordIndex++
|
|
||||||
selectedRecordIndex++
|
|
||||||
case moveToNextPage:
|
|
||||||
if selectedRecordIndex != lastRecordIndex-1 {
|
|
||||||
selectedRecordIndex = lastRecordIndex - 1
|
|
||||||
break
|
|
||||||
}
|
|
||||||
firstRecordIndex += recordsPerPage
|
|
||||||
lastRecordIndex = firstRecordIndex + recordsPerPage
|
|
||||||
selectedRecordIndex = lastRecordIndex - 1
|
|
||||||
case moveHome:
|
|
||||||
firstRecordIndex = 0
|
|
||||||
lastRecordIndex = firstRecordIndex + recordsPerPage
|
|
||||||
selectedRecordIndex = 0
|
|
||||||
case moveEnd:
|
|
||||||
lastRecordIndex = math.MaxInt32
|
|
||||||
firstRecordIndex = lastRecordIndex - recordsPerPage
|
|
||||||
selectedRecordIndex = lastRecordIndex - 1
|
|
||||||
default:
|
|
||||||
lastRecordIndex = firstRecordIndex + recordsPerPage
|
|
||||||
}
|
|
||||||
|
|
||||||
return firstRecordIndex, lastRecordIndex, selectedRecordIndex
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *RecordsView) GetInnerRect() (int, int, int, int) {
|
|
||||||
x, y, width, height := v.Box.GetInnerRect()
|
|
||||||
|
|
||||||
// Left padding.
|
|
||||||
x = min(x+3, x+width-1)
|
|
||||||
width = max(width-3, 0)
|
|
||||||
|
|
||||||
return x, y, width, height
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *RecordsView) Draw(screen tcell.Screen) {
|
|
||||||
v.mu.RLock()
|
|
||||||
firstRecordIndex := v.firstRecordIndex
|
|
||||||
lastRecordIndex := v.lastRecordIndex
|
|
||||||
selectedRecordIndex := v.selectedRecordIndex
|
|
||||||
records := v.records
|
|
||||||
v.mu.RUnlock()
|
|
||||||
|
|
||||||
v.DrawForSubclass(screen, v)
|
|
||||||
|
|
||||||
x, y, width, height := v.GetInnerRect()
|
|
||||||
if height == 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// No records in that bucket.
|
|
||||||
if firstRecordIndex == lastRecordIndex {
|
|
||||||
tview.Print(
|
|
||||||
screen, "Empty Bucket", x, y, width, tview.AlignCenter, tview.Styles.PrimaryTextColor,
|
|
||||||
)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
for index := firstRecordIndex; index < lastRecordIndex; index++ {
|
|
||||||
result := records[index].Entry
|
|
||||||
text := result.String()
|
|
||||||
|
|
||||||
if index == selectedRecordIndex {
|
|
||||||
text = fmt.Sprintf("[:white]%s[:-]", text)
|
|
||||||
tview.Print(screen, text, x, y, width, tview.AlignLeft, tview.Styles.PrimitiveBackgroundColor)
|
|
||||||
} else {
|
|
||||||
tview.Print(screen, text, x, y, width, tview.AlignLeft, tview.Styles.PrimaryTextColor)
|
|
||||||
}
|
|
||||||
|
|
||||||
y++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *RecordsView) InputHandler() func(event *tcell.EventKey, _ func(p tview.Primitive)) {
|
|
||||||
return v.WrapInputHandler(func(event *tcell.EventKey, _ func(p tview.Primitive)) {
|
|
||||||
switch m, k := event.Modifiers(), event.Key(); {
|
|
||||||
case m == 0 && k == tcell.KeyPgUp:
|
|
||||||
v.updateType = moveToPrevPage
|
|
||||||
case m == 0 && k == tcell.KeyPgDn:
|
|
||||||
v.updateType = moveToNextPage
|
|
||||||
case m == 0 && k == tcell.KeyUp:
|
|
||||||
v.updateType = moveUp
|
|
||||||
case m == 0 && k == tcell.KeyDown:
|
|
||||||
v.updateType = moveDown
|
|
||||||
case m == 0 && k == tcell.KeyHome:
|
|
||||||
v.updateType = moveHome
|
|
||||||
case m == 0 && k == tcell.KeyEnd:
|
|
||||||
v.updateType = moveEnd
|
|
||||||
case k == tcell.KeyEnter:
|
|
||||||
v.mu.RLock()
|
|
||||||
selectedRecordIndex := v.selectedRecordIndex
|
|
||||||
records := v.records
|
|
||||||
v.mu.RUnlock()
|
|
||||||
if len(records) != 0 {
|
|
||||||
current := records[selectedRecordIndex]
|
|
||||||
v.ui.moveNextPage(NewDetailedView(current.Entry.DetailedString()))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
|
@ -1,18 +0,0 @@
|
||||||
package tui
|
|
||||||
|
|
||||||
import (
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Bucket struct {
|
|
||||||
Name []byte
|
|
||||||
Path [][]byte
|
|
||||||
Entry common.SchemaEntry
|
|
||||||
NextParser common.Parser
|
|
||||||
}
|
|
||||||
|
|
||||||
type Record struct {
|
|
||||||
Key, Value []byte
|
|
||||||
Path [][]byte
|
|
||||||
Entry common.SchemaEntry
|
|
||||||
}
|
|
|
@ -1,561 +0,0 @@
|
||||||
package tui
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
"sync/atomic"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
|
|
||||||
"github.com/davecgh/go-spew/spew"
|
|
||||||
"github.com/gdamore/tcell/v2"
|
|
||||||
"github.com/rivo/tview"
|
|
||||||
"go.etcd.io/bbolt"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Config struct {
|
|
||||||
LoadBufferSize int
|
|
||||||
SearchHistorySize int
|
|
||||||
LoadingIndicatorLag time.Duration
|
|
||||||
}
|
|
||||||
|
|
||||||
var DefaultConfig = Config{
|
|
||||||
LoadBufferSize: 100,
|
|
||||||
SearchHistorySize: 100,
|
|
||||||
LoadingIndicatorLag: 500 * time.Millisecond,
|
|
||||||
}
|
|
||||||
|
|
||||||
type Primitive interface {
|
|
||||||
tview.Primitive
|
|
||||||
|
|
||||||
Mount(ctx context.Context) error
|
|
||||||
Update(ctx context.Context) error
|
|
||||||
Unmount()
|
|
||||||
}
|
|
||||||
|
|
||||||
type UI struct {
|
|
||||||
*tview.Box
|
|
||||||
|
|
||||||
// Need to use context while updating pages those read data from a database.
|
|
||||||
// Context should be shared among all mount and updates. Current TUI library
|
|
||||||
// doesn't use contexts at all, so I do that feature by myself.
|
|
||||||
//nolint:containedctx
|
|
||||||
ctx context.Context
|
|
||||||
onStop func()
|
|
||||||
|
|
||||||
app *tview.Application
|
|
||||||
db *bbolt.DB
|
|
||||||
|
|
||||||
pageHistory []Primitive
|
|
||||||
mountedPage Primitive
|
|
||||||
|
|
||||||
pageToMount Primitive
|
|
||||||
|
|
||||||
pageStub tview.Primitive
|
|
||||||
|
|
||||||
infoBar *tview.TextView
|
|
||||||
searchBar *InputFieldWithHistory
|
|
||||||
loadingBar *LoadingBar
|
|
||||||
helpBar *tview.TextView
|
|
||||||
|
|
||||||
helpPage *HelpPage
|
|
||||||
|
|
||||||
searchErrorBar *tview.TextView
|
|
||||||
|
|
||||||
isSearching bool
|
|
||||||
isLoading atomic.Bool
|
|
||||||
isShowingError bool
|
|
||||||
isShowingHelp bool
|
|
||||||
|
|
||||||
loadBufferSize int
|
|
||||||
|
|
||||||
rootParser common.Parser
|
|
||||||
|
|
||||||
loadingIndicatorLag time.Duration
|
|
||||||
|
|
||||||
cancelLoading func()
|
|
||||||
|
|
||||||
filters map[string]func(string) (any, error)
|
|
||||||
compositeFilters map[string]func(string) (map[string]any, error)
|
|
||||||
filterHints map[string]string
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewUI(
|
|
||||||
ctx context.Context,
|
|
||||||
app *tview.Application,
|
|
||||||
db *bbolt.DB,
|
|
||||||
rootParser common.Parser,
|
|
||||||
cfg *Config,
|
|
||||||
) *UI {
|
|
||||||
spew.Config.DisableMethods = true
|
|
||||||
|
|
||||||
if cfg == nil {
|
|
||||||
cfg = &DefaultConfig
|
|
||||||
}
|
|
||||||
|
|
||||||
ui := &UI{
|
|
||||||
Box: tview.NewBox(),
|
|
||||||
|
|
||||||
app: app,
|
|
||||||
db: db,
|
|
||||||
rootParser: rootParser,
|
|
||||||
|
|
||||||
filters: make(map[string]func(string) (any, error)),
|
|
||||||
compositeFilters: make(map[string]func(string) (map[string]any, error)),
|
|
||||||
filterHints: make(map[string]string),
|
|
||||||
|
|
||||||
loadBufferSize: cfg.LoadBufferSize,
|
|
||||||
loadingIndicatorLag: cfg.LoadingIndicatorLag,
|
|
||||||
}
|
|
||||||
|
|
||||||
ui.ctx, ui.onStop = context.WithCancel(ctx)
|
|
||||||
|
|
||||||
backgroundColor := ui.GetBackgroundColor()
|
|
||||||
textColor := tview.Styles.PrimaryTextColor
|
|
||||||
|
|
||||||
inverseBackgroundColor := textColor
|
|
||||||
inverseTextColor := backgroundColor
|
|
||||||
|
|
||||||
alertTextColor := tcell.ColorRed
|
|
||||||
|
|
||||||
ui.pageStub = tview.NewBox()
|
|
||||||
|
|
||||||
ui.infoBar = tview.NewTextView()
|
|
||||||
ui.infoBar.SetBackgroundColor(inverseBackgroundColor)
|
|
||||||
ui.infoBar.SetTextColor(inverseTextColor)
|
|
||||||
ui.infoBar.SetText(
|
|
||||||
fmt.Sprintf(" %s (press h for help, q to quit) ", db.Path()),
|
|
||||||
)
|
|
||||||
|
|
||||||
ui.searchBar = NewInputFieldWithHistory(cfg.SearchHistorySize)
|
|
||||||
ui.searchBar.SetFieldBackgroundColor(backgroundColor)
|
|
||||||
ui.searchBar.SetFieldTextColor(textColor)
|
|
||||||
ui.searchBar.SetLabelColor(textColor)
|
|
||||||
ui.searchBar.Focus(nil)
|
|
||||||
ui.searchBar.SetLabel("/")
|
|
||||||
|
|
||||||
ui.searchErrorBar = tview.NewTextView()
|
|
||||||
ui.searchErrorBar.SetBackgroundColor(backgroundColor)
|
|
||||||
ui.searchErrorBar.SetTextColor(alertTextColor)
|
|
||||||
|
|
||||||
ui.helpBar = tview.NewTextView()
|
|
||||||
ui.helpBar.SetBackgroundColor(inverseBackgroundColor)
|
|
||||||
ui.helpBar.SetTextColor(inverseTextColor)
|
|
||||||
ui.helpBar.SetText(" Press Enter for next page or Escape to exit help ")
|
|
||||||
|
|
||||||
ui.loadingBar = NewLoadingBar(ui.triggerDraw)
|
|
||||||
|
|
||||||
ui.pageToMount = NewBucketsView(ui, NewFilter(nil))
|
|
||||||
|
|
||||||
return ui
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ui *UI) checkFilterExists(typ string) bool {
|
|
||||||
if _, ok := ui.filters[typ]; ok {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if _, ok := ui.compositeFilters[typ]; ok {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ui *UI) AddFilter(
|
|
||||||
typ string,
|
|
||||||
parser func(string) (any, error),
|
|
||||||
helpHint string,
|
|
||||||
) error {
|
|
||||||
if ui.checkFilterExists(typ) {
|
|
||||||
return fmt.Errorf("filter %s already exists", typ)
|
|
||||||
}
|
|
||||||
ui.filters[typ] = parser
|
|
||||||
ui.filterHints[typ] = helpHint
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ui *UI) AddCompositeFilter(
|
|
||||||
typ string,
|
|
||||||
parser func(string) (map[string]any, error),
|
|
||||||
helpHint string,
|
|
||||||
) error {
|
|
||||||
if ui.checkFilterExists(typ) {
|
|
||||||
return fmt.Errorf("filter %s already exists", typ)
|
|
||||||
}
|
|
||||||
ui.compositeFilters[typ] = parser
|
|
||||||
ui.filterHints[typ] = helpHint
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ui *UI) stopOnError(err error) {
|
|
||||||
if err != nil {
|
|
||||||
ui.onStop()
|
|
||||||
ui.app.QueueEvent(tcell.NewEventError(err))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ui *UI) stop() {
|
|
||||||
ui.onStop()
|
|
||||||
ui.app.Stop()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ui *UI) movePrevPage() {
|
|
||||||
if len(ui.pageHistory) != 0 {
|
|
||||||
ui.mountedPage.Unmount()
|
|
||||||
ui.mountedPage = ui.pageHistory[len(ui.pageHistory)-1]
|
|
||||||
ui.pageHistory = ui.pageHistory[:len(ui.pageHistory)-1]
|
|
||||||
ui.triggerDraw()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ui *UI) moveNextPage(page Primitive) {
|
|
||||||
ui.pageToMount = page
|
|
||||||
ui.triggerDraw()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ui *UI) triggerDraw() {
|
|
||||||
go ui.app.QueueUpdateDraw(func() {})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ui *UI) Draw(screen tcell.Screen) {
|
|
||||||
if ui.isLoading.Load() {
|
|
||||||
ui.draw(screen)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
ui.isLoading.Store(true)
|
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(ui.ctx)
|
|
||||||
|
|
||||||
ready := make(chan struct{})
|
|
||||||
go func() {
|
|
||||||
ui.load(ctx)
|
|
||||||
|
|
||||||
cancel()
|
|
||||||
close(ready)
|
|
||||||
ui.isLoading.Store(false)
|
|
||||||
}()
|
|
||||||
|
|
||||||
select {
|
|
||||||
case <-ready:
|
|
||||||
case <-time.After(ui.loadingIndicatorLag):
|
|
||||||
ui.loadingBar.Start(ui.ctx)
|
|
||||||
ui.cancelLoading = cancel
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
<-ready
|
|
||||||
ui.loadingBar.Stop()
|
|
||||||
ui.triggerDraw()
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
ui.draw(screen)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ui *UI) load(ctx context.Context) {
|
|
||||||
if ui.mountedPage == nil && ui.pageToMount == nil {
|
|
||||||
ui.stop()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if ui.pageToMount != nil {
|
|
||||||
ui.mountAndUpdate(ctx)
|
|
||||||
} else {
|
|
||||||
ui.update(ctx)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ui *UI) draw(screen tcell.Screen) {
|
|
||||||
ui.DrawForSubclass(screen, ui)
|
|
||||||
x, y, width, height := ui.GetInnerRect()
|
|
||||||
|
|
||||||
var (
|
|
||||||
pageToDraw tview.Primitive
|
|
||||||
barToDraw tview.Primitive
|
|
||||||
)
|
|
||||||
|
|
||||||
switch {
|
|
||||||
case ui.isShowingHelp:
|
|
||||||
if ui.helpPage == nil {
|
|
||||||
var filters []string
|
|
||||||
for f := range ui.filters {
|
|
||||||
filters = append(filters, f)
|
|
||||||
}
|
|
||||||
for f := range ui.compositeFilters {
|
|
||||||
filters = append(filters, f)
|
|
||||||
}
|
|
||||||
ui.helpPage = NewHelpPage(filters, ui.filterHints)
|
|
||||||
}
|
|
||||||
pageToDraw = ui.helpPage
|
|
||||||
case ui.mountedPage != nil:
|
|
||||||
pageToDraw = ui.mountedPage
|
|
||||||
default:
|
|
||||||
pageToDraw = ui.pageStub
|
|
||||||
}
|
|
||||||
|
|
||||||
pageToDraw.SetRect(x, y, width, height-1)
|
|
||||||
pageToDraw.Draw(screen)
|
|
||||||
|
|
||||||
// Search bar uses cursor and we need to hide it when another bar is drawn.
|
|
||||||
screen.HideCursor()
|
|
||||||
|
|
||||||
switch {
|
|
||||||
case ui.isLoading.Load():
|
|
||||||
barToDraw = ui.loadingBar
|
|
||||||
case ui.isSearching:
|
|
||||||
barToDraw = ui.searchBar
|
|
||||||
case ui.isShowingError:
|
|
||||||
barToDraw = ui.searchErrorBar
|
|
||||||
case ui.isShowingHelp:
|
|
||||||
barToDraw = ui.helpBar
|
|
||||||
default:
|
|
||||||
barToDraw = ui.infoBar
|
|
||||||
}
|
|
||||||
|
|
||||||
barToDraw.SetRect(x, y+height-1, width, 1)
|
|
||||||
barToDraw.Draw(screen)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ui *UI) mountAndUpdate(ctx context.Context) {
|
|
||||||
defer func() {
|
|
||||||
// Operation succeeded or was canceled, either way reset page to mount.
|
|
||||||
ui.pageToMount = nil
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Mount should use app global context.
|
|
||||||
//nolint:contextcheck
|
|
||||||
err := ui.pageToMount.Mount(ui.ctx)
|
|
||||||
if err != nil {
|
|
||||||
ui.stopOnError(err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
x, y, width, height := ui.GetInnerRect()
|
|
||||||
ui.pageToMount.SetRect(x, y, width, height-1)
|
|
||||||
|
|
||||||
s := loadOp(ctx, ui.pageToMount.Update)
|
|
||||||
if s.err != nil {
|
|
||||||
ui.pageToMount.Unmount()
|
|
||||||
ui.stopOnError(s.err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Update was canceled.
|
|
||||||
if !s.done {
|
|
||||||
ui.pageToMount.Unmount()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if ui.mountedPage != nil {
|
|
||||||
ui.pageHistory = append(ui.pageHistory, ui.mountedPage)
|
|
||||||
}
|
|
||||||
ui.mountedPage = ui.pageToMount
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ui *UI) update(ctx context.Context) {
|
|
||||||
x, y, width, height := ui.GetInnerRect()
|
|
||||||
ui.mountedPage.SetRect(x, y, width, height-1)
|
|
||||||
|
|
||||||
s := loadOp(ctx, ui.mountedPage.Update)
|
|
||||||
if s.err != nil {
|
|
||||||
ui.stopOnError(s.err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type status struct {
|
|
||||||
done bool
|
|
||||||
err error
|
|
||||||
}
|
|
||||||
|
|
||||||
func loadOp(ctx context.Context, op func(ctx context.Context) error) status {
|
|
||||||
errCh := make(chan error)
|
|
||||||
go func() {
|
|
||||||
errCh <- op(ctx)
|
|
||||||
}()
|
|
||||||
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
return status{done: false, err: nil}
|
|
||||||
case err := <-errCh:
|
|
||||||
return status{done: true, err: err}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ui *UI) InputHandler() func(event *tcell.EventKey, setFocus func(p tview.Primitive)) {
|
|
||||||
return ui.WrapInputHandler(func(event *tcell.EventKey, _ func(tview.Primitive)) {
|
|
||||||
switch {
|
|
||||||
case ui.isLoading.Load():
|
|
||||||
ui.handleInputOnLoading(event)
|
|
||||||
case ui.isShowingHelp:
|
|
||||||
ui.handleInputOnShowingHelp(event)
|
|
||||||
case ui.isShowingError:
|
|
||||||
ui.handleInputOnShowingError()
|
|
||||||
case ui.isSearching:
|
|
||||||
ui.handleInputOnSearching(event)
|
|
||||||
default:
|
|
||||||
ui.handleInput(event)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ui *UI) handleInput(event *tcell.EventKey) {
|
|
||||||
m, k, r := event.Modifiers(), event.Key(), event.Rune()
|
|
||||||
|
|
||||||
switch {
|
|
||||||
case k == tcell.KeyEsc:
|
|
||||||
ui.movePrevPage()
|
|
||||||
case m == 0 && k == tcell.KeyRune && r == 'h':
|
|
||||||
ui.isShowingHelp = true
|
|
||||||
case m == 0 && k == tcell.KeyRune && r == '/':
|
|
||||||
ui.isSearching = true
|
|
||||||
case m == 0 && k == tcell.KeyRune && r == 'q':
|
|
||||||
ui.stop()
|
|
||||||
default:
|
|
||||||
if ui.mountedPage != nil {
|
|
||||||
ui.mountedPage.InputHandler()(event, func(tview.Primitive) {})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ui *UI) handleInputOnLoading(event *tcell.EventKey) {
|
|
||||||
switch k, r := event.Key(), event.Rune(); {
|
|
||||||
case k == tcell.KeyEsc:
|
|
||||||
ui.cancelLoading()
|
|
||||||
case k == tcell.KeyRune && r == 'q':
|
|
||||||
ui.stop()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ui *UI) handleInputOnShowingError() {
|
|
||||||
ui.isShowingError = false
|
|
||||||
ui.isSearching = true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ui *UI) handleInputOnShowingHelp(event *tcell.EventKey) {
|
|
||||||
k, r := event.Key(), event.Rune()
|
|
||||||
|
|
||||||
switch {
|
|
||||||
case k == tcell.KeyEsc:
|
|
||||||
ui.isShowingHelp = false
|
|
||||||
case k == tcell.KeyRune && r == 'q':
|
|
||||||
ui.stop()
|
|
||||||
default:
|
|
||||||
ui.helpPage.InputHandler()(event, func(tview.Primitive) {})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ui *UI) handleInputOnSearching(event *tcell.EventKey) {
|
|
||||||
m, k := event.Modifiers(), event.Key()
|
|
||||||
|
|
||||||
switch {
|
|
||||||
case k == tcell.KeyEnter:
|
|
||||||
prompt := ui.searchBar.GetText()
|
|
||||||
|
|
||||||
res, err := ui.processPrompt(prompt)
|
|
||||||
if err != nil {
|
|
||||||
ui.isShowingError = true
|
|
||||||
ui.isSearching = false
|
|
||||||
ui.searchErrorBar.SetText(err.Error() + " (press any key to continue)")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
switch ui.mountedPage.(type) {
|
|
||||||
case *BucketsView:
|
|
||||||
ui.moveNextPage(NewBucketsView(ui, res))
|
|
||||||
case *RecordsView:
|
|
||||||
bucket := ui.mountedPage.(*RecordsView).bucket
|
|
||||||
ui.moveNextPage(NewRecordsView(ui, bucket, res))
|
|
||||||
}
|
|
||||||
|
|
||||||
if ui.searchBar.GetText() != "" {
|
|
||||||
ui.searchBar.AddToHistory(ui.searchBar.GetText())
|
|
||||||
}
|
|
||||||
|
|
||||||
ui.searchBar.SetText("")
|
|
||||||
ui.isSearching = false
|
|
||||||
case k == tcell.KeyEsc:
|
|
||||||
ui.isSearching = false
|
|
||||||
case (k == tcell.KeyBackspace2 || m&tcell.ModCtrl != 0 && k == tcell.KeyETB) && len(ui.searchBar.GetText()) == 0:
|
|
||||||
ui.isSearching = false
|
|
||||||
default:
|
|
||||||
ui.searchBar.InputHandler()(event, func(tview.Primitive) {})
|
|
||||||
}
|
|
||||||
|
|
||||||
ui.Box.MouseHandler()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ui *UI) WithPrompt(prompt string) error {
|
|
||||||
filter, err := ui.processPrompt(prompt)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
ui.pageToMount = NewBucketsView(ui, filter)
|
|
||||||
|
|
||||||
if prompt != "" {
|
|
||||||
ui.searchBar.AddToHistory(prompt)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ui *UI) processPrompt(prompt string) (filter *Filter, err error) {
|
|
||||||
if prompt == "" {
|
|
||||||
return NewFilter(nil), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
filterMap := make(map[string]any)
|
|
||||||
|
|
||||||
for _, filterString := range strings.Split(prompt, "+") {
|
|
||||||
parts := strings.Split(filterString, ":")
|
|
||||||
if len(parts) != 2 {
|
|
||||||
return nil, errors.New("expected 'tag:value [+ tag:value]...'")
|
|
||||||
}
|
|
||||||
|
|
||||||
filterTag := strings.TrimSpace(parts[0])
|
|
||||||
filterValueString := strings.TrimSpace(parts[1])
|
|
||||||
|
|
||||||
if _, exists := filterMap[filterTag]; exists {
|
|
||||||
return nil, fmt.Errorf("duplicate filter tag '%s'", filterTag)
|
|
||||||
}
|
|
||||||
|
|
||||||
parser, ok := ui.filters[filterTag]
|
|
||||||
if ok {
|
|
||||||
filterValue, err := parser(filterValueString)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("can't parse '%s' filter value: %w", filterTag, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
filterMap[filterTag] = filterValue
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
compositeParser, ok := ui.compositeFilters[filterTag]
|
|
||||||
if ok {
|
|
||||||
compositeFilterValue, err := compositeParser(filterValueString)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf(
|
|
||||||
"can't parse '%s' filter value '%s': %w",
|
|
||||||
filterTag, filterValueString, err,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
for tag, value := range compositeFilterValue {
|
|
||||||
if _, exists := filterMap[tag]; exists {
|
|
||||||
return nil, fmt.Errorf(
|
|
||||||
"found duplicate filter tag '%s' while processing composite filter with tag '%s'",
|
|
||||||
tag, filterTag,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
filterMap[tag] = value
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, fmt.Errorf("unknown filter tag '%s'", filterTag)
|
|
||||||
}
|
|
||||||
|
|
||||||
return NewFilter(filterMap), nil
|
|
||||||
}
|
|
|
@ -1,97 +0,0 @@
|
||||||
package tui
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
|
||||||
"github.com/mr-tron/base58"
|
|
||||||
)
|
|
||||||
|
|
||||||
func CIDParser(s string) (any, error) {
|
|
||||||
data, err := base58.Decode(s)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
var id cid.ID
|
|
||||||
if err = id.Decode(data); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return id, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func OIDParser(s string) (any, error) {
|
|
||||||
data, err := base58.Decode(s)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
var id oid.ID
|
|
||||||
if err = id.Decode(data); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return id, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func AddressParser(s string) (map[string]any, error) {
|
|
||||||
m := make(map[string]any)
|
|
||||||
|
|
||||||
parts := strings.Split(s, "/")
|
|
||||||
if len(parts) != 2 {
|
|
||||||
return nil, errors.New("expected <cid>/<oid>")
|
|
||||||
}
|
|
||||||
cnr, err := CIDParser(parts[0])
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
obj, err := OIDParser(parts[1])
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
m["cid"] = cnr
|
|
||||||
m["oid"] = obj
|
|
||||||
|
|
||||||
return m, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func keyParser(s string) (any, error) {
|
|
||||||
if s == "" {
|
|
||||||
return nil, errors.New("empty attribute key")
|
|
||||||
}
|
|
||||||
return s, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func valueParser(s string) (any, error) {
|
|
||||||
if s == "" {
|
|
||||||
return nil, errors.New("empty attribute value")
|
|
||||||
}
|
|
||||||
return s, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func AttributeParser(s string) (map[string]any, error) {
|
|
||||||
m := make(map[string]any)
|
|
||||||
|
|
||||||
parts := strings.Split(s, "/")
|
|
||||||
if len(parts) != 1 && len(parts) != 2 {
|
|
||||||
return nil, errors.New("expected <key> or <key>/<value>")
|
|
||||||
}
|
|
||||||
|
|
||||||
key, err := keyParser(parts[0])
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
m["key"] = key
|
|
||||||
|
|
||||||
if len(parts) == 1 {
|
|
||||||
return m, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
value, err := valueParser(parts[1])
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
m["value"] = value
|
|
||||||
|
|
||||||
return m, nil
|
|
||||||
}
|
|
|
@ -17,5 +17,5 @@ var Root = &cobra.Command{
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
Root.AddCommand(listCMD, inspectCMD, tuiCMD)
|
Root.AddCommand(listCMD, inspectCMD)
|
||||||
}
|
}
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue