forked from TrueCloudLab/frostfs-node
Compare commits
18 commits
f1f3c80dbf
...
b4582239bf
Author | SHA1 | Date | |
---|---|---|---|
b4582239bf | |||
4e244686cf | |||
6cd806f998 | |||
3e6fd4c611 | |||
5ae4446280 | |||
5890cd4d7d | |||
365adb4ebd | |||
bce5827f64 | |||
05471d3827 | |||
8226d49376 | |||
0893689c6a | |||
a4931ea4c7 | |||
861e9ab59a | |||
|
24a540caa8 | ||
6226c3ba86 | |||
f2250a316f | |||
9929dcf50b | |||
7486c02bbc |
53 changed files with 450 additions and 351 deletions
1
.github/CODEOWNERS
vendored
1
.github/CODEOWNERS
vendored
|
@ -1 +0,0 @@
|
|||
* @TrueCloudLab/storage-core @TrueCloudLab/committers
|
29
.github/workflows/changelog.yml
vendored
29
.github/workflows/changelog.yml
vendored
|
@ -1,29 +0,0 @@
|
|||
name: CHANGELOG check
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- support/**
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
name: Check for updates
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Get changed CHANGELOG
|
||||
id: changelog-diff
|
||||
uses: tj-actions/changed-files@v29
|
||||
with:
|
||||
files: CHANGELOG.md
|
||||
|
||||
- name: Fail if changelog not updated
|
||||
if: steps.changelog-diff.outputs.any_changed == 'false'
|
||||
uses: actions/github-script@v3
|
||||
with:
|
||||
script: |
|
||||
core.setFailed('CHANGELOG.md has not been updated')
|
37
.github/workflows/config-update.yml
vendored
37
.github/workflows/config-update.yml
vendored
|
@ -1,37 +0,0 @@
|
|||
name: Configuration check
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- support/**
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
name: config-check
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Get changed config-related files
|
||||
id: config-diff
|
||||
uses: tj-actions/changed-files@v29
|
||||
with:
|
||||
files: |
|
||||
config/**
|
||||
cmd/neofs-node/config/**
|
||||
|
||||
- name: Get changed doc files
|
||||
id: docs-diff
|
||||
uses: tj-actions/changed-files@v29
|
||||
with:
|
||||
files: docs/**
|
||||
|
||||
- name: Fail if config files are changed but the documentation is not updated
|
||||
if: steps.config-diff.outputs.any_changed == 'true' && steps.docs-diff.outputs.any_changed == 'false'
|
||||
uses: actions/github-script@v3
|
||||
with:
|
||||
script: |
|
||||
core.setFailed('Documentation has not been updated')
|
22
.github/workflows/dco.yml
vendored
22
.github/workflows/dco.yml
vendored
|
@ -1,22 +0,0 @@
|
|||
name: DCO check
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- support/**
|
||||
|
||||
jobs:
|
||||
commits_check_job:
|
||||
runs-on: ubuntu-latest
|
||||
name: Commits Check
|
||||
steps:
|
||||
- name: Get PR Commits
|
||||
id: 'get-pr-commits'
|
||||
uses: tim-actions/get-pr-commits@master
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: DCO Check
|
||||
uses: tim-actions/dco@master
|
||||
with:
|
||||
commits: ${{ steps.get-pr-commits.outputs.commits }}
|
60
.github/workflows/go.yml
vendored
60
.github/workflows/go.yml
vendored
|
@ -1,60 +0,0 @@
|
|||
name: frostfs-node tests
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- support/**
|
||||
paths-ignore:
|
||||
- '*.md'
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- support/**
|
||||
paths-ignore:
|
||||
- '*.md'
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-20.04
|
||||
strategy:
|
||||
matrix:
|
||||
go: [ '1.18.x', '1.19.x' ]
|
||||
steps:
|
||||
- name: Setup go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: ${{ matrix.go }}
|
||||
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Cache go mod
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ matrix.go }}-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-${{ matrix.go }}-
|
||||
|
||||
- name: Run go test
|
||||
run: go test -coverprofile=coverage.txt -covermode=atomic ./...
|
||||
|
||||
- name: Codecov
|
||||
env:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
run: bash <(curl -s https://codecov.io/bash)
|
||||
|
||||
lint:
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.19
|
||||
- uses: actions/checkout@v3
|
||||
- name: golangci-lint
|
||||
uses: golangci/golangci-lint-action@v3
|
||||
with:
|
||||
version: v1.50.0
|
||||
args: --timeout=5m
|
||||
only-new-issues: true
|
10
.gitlint
Normal file
10
.gitlint
Normal file
|
@ -0,0 +1,10 @@
|
|||
[general]
|
||||
fail-without-commits=true
|
||||
contrib=CC1
|
||||
|
||||
[title-match-regex]
|
||||
regex=^\[\#[0-9]+\]\s
|
||||
|
||||
[ignore-by-title]
|
||||
regex=^Release(.*)
|
||||
ignore=title-match-regex
|
|
@ -24,6 +24,8 @@ linters-settings:
|
|||
govet:
|
||||
# report about shadowed variables
|
||||
check-shadowing: false
|
||||
staticcheck:
|
||||
checks: ["all", "-SA1019"] # TODO Enable SA1019 after deprecated warning are fixed.
|
||||
|
||||
linters:
|
||||
enable:
|
||||
|
@ -53,4 +55,3 @@ linters:
|
|||
- whitespace
|
||||
disable-all: true
|
||||
fast: false
|
||||
|
||||
|
|
36
.pre-commit-config.yaml
Normal file
36
.pre-commit-config.yaml
Normal file
|
@ -0,0 +1,36 @@
|
|||
ci:
|
||||
autofix_prs: false
|
||||
|
||||
repos:
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v4.4.0
|
||||
hooks:
|
||||
- id: check-added-large-files
|
||||
- id: check-case-conflict
|
||||
- id: check-executables-have-shebangs
|
||||
- id: check-shebang-scripts-are-executable
|
||||
- id: check-merge-conflict
|
||||
- id: check-json
|
||||
- id: check-xml
|
||||
- id: check-yaml
|
||||
- id: trailing-whitespace
|
||||
args: [--markdown-linebreak-ext=md]
|
||||
- id: end-of-file-fixer
|
||||
exclude: ".key$"
|
||||
|
||||
- repo: https://github.com/golangci/golangci-lint
|
||||
rev: v1.51.2
|
||||
hooks:
|
||||
- id: golangci-lint
|
||||
|
||||
- repo: https://github.com/jorisroovers/gitlint
|
||||
rev: v0.18.0
|
||||
hooks:
|
||||
- id: gitlint
|
||||
stages: [commit-msg]
|
||||
|
||||
- repo: https://github.com/koalaman/shellcheck-precommit
|
||||
rev: v0.9.0
|
||||
hooks:
|
||||
- id: shellcheck
|
||||
# args: ["--severity=warning"] # Optionally only show errors and warnings
|
|
@ -13,6 +13,7 @@ Changelog for FrostFS Node
|
|||
- Reload config for pprof and metrics on SIGHUP in `neofs-node` (#1868)
|
||||
- Multiple configs support (#44)
|
||||
- Parameters `nns-name` and `nns-zone` for command `frostfs-cli container create` (#37)
|
||||
- Tree service now saves the last synchronization height which persists across restarts (#82)
|
||||
|
||||
### Changed
|
||||
- Change `frostfs_node_engine_container_size` to counting sizes of logical objects
|
||||
|
@ -46,6 +47,8 @@ Changelog for FrostFS Node
|
|||
- Do not fetch an object if `meta` is missing it (#61)
|
||||
- Create contract wallet only by `init` and `update-config` command (#63)
|
||||
- Actually use `object.put.pool_size_local` and independent pool for local puts (#64).
|
||||
- Pretty printer of basic ACL in the NeoFS CLI (#2259)
|
||||
- Adding of public key for nns group `group.frostfs` at init step (#130)
|
||||
|
||||
### Removed
|
||||
### Updated
|
||||
|
|
11
Makefile
Normal file → Executable file
11
Makefile
Normal file → Executable file
|
@ -26,7 +26,7 @@ PKG_VERSION ?= $(shell echo $(VERSION) | sed "s/^v//" | \
|
|||
sed "s/-/~/")-${OS_RELEASE}
|
||||
|
||||
.PHONY: help all images dep clean fmts fmt imports test lint docker/lint
|
||||
prepare-release debpackage
|
||||
prepare-release debpackage pre-commit unpre-commit
|
||||
|
||||
# To build a specific binary, use it's name prefix with bin/ as a target
|
||||
# For example `make bin/frostfs-node` will build only storage node binary
|
||||
|
@ -140,10 +140,19 @@ docker/lint:
|
|||
--env HOME=/src \
|
||||
golangci/golangci-lint:v$(LINT_VERSION) bash -c 'cd /src/ && make lint'
|
||||
|
||||
# Activate pre-commit hooks
|
||||
pre-commit:
|
||||
pre-commit install -t pre-commit -t commit-msg
|
||||
|
||||
# Deactivate pre-commit hooks
|
||||
unpre-commit:
|
||||
pre-commit uninstall -t pre-commit -t commit-msg
|
||||
|
||||
# Print version
|
||||
version:
|
||||
@echo $(VERSION)
|
||||
|
||||
# Delete built artifacts
|
||||
clean:
|
||||
rm -rf vendor
|
||||
rm -rf .cache
|
||||
|
|
|
@ -20,7 +20,6 @@ At FrostFS private install deployment, frostfs-adm requires compiled FrostFS
|
|||
contracts. Find them in the latest release of
|
||||
[frostfs-contract repository](https://git.frostfs.info/TrueCloudLab/frostfs-contract/releases).
|
||||
|
||||
|
||||
## Commands
|
||||
|
||||
### Config
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
"text/tabwriter"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-contract/nns"
|
||||
morphClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
|
||||
"github.com/nspcc-dev/neo-go/pkg/io"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap"
|
||||
|
@ -123,7 +124,7 @@ func dumpCustomZoneHashes(cmd *cobra.Command, nnsHash util.Uint160, zone string,
|
|||
return
|
||||
}
|
||||
|
||||
if !bytes.HasSuffix(bs, []byte(zone)) {
|
||||
if !bytes.HasSuffix(bs, []byte(zone)) || bytes.HasPrefix(bs, []byte(morphClient.NNSGroupKeyName)) {
|
||||
// Related https://github.com/nspcc-dev/neofs-contract/issues/316.
|
||||
return
|
||||
}
|
||||
|
|
|
@ -82,13 +82,13 @@ func (c *initializeContext) setNNS() error {
|
|||
|
||||
func (c *initializeContext) updateNNSGroup(nnsHash util.Uint160, pub *keys.PublicKey) error {
|
||||
bw := io.NewBufBinWriter()
|
||||
needUpdate, needRegister, err := c.emitUpdateNNSGroupScript(bw, nnsHash, pub)
|
||||
if !needUpdate || err != nil {
|
||||
keyAlreadyAdded, domainRegCodeEmitted, err := c.emitUpdateNNSGroupScript(bw, nnsHash, pub)
|
||||
if keyAlreadyAdded || err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
script := bw.Bytes()
|
||||
if needRegister {
|
||||
if domainRegCodeEmitted {
|
||||
w := io.NewBufBinWriter()
|
||||
emit.Instruction(w.BinWriter, opcode.INITSSLOT, []byte{1})
|
||||
wrapRegisterScriptWithPrice(w, nnsHash, script)
|
||||
|
@ -228,21 +228,28 @@ func nnsResolve(inv *invoker.Invoker, nnsHash util.Uint160, domain string) (stac
|
|||
}
|
||||
|
||||
func nnsResolveKey(inv *invoker.Invoker, nnsHash util.Uint160, domain string) (*keys.PublicKey, error) {
|
||||
item, err := nnsResolve(inv, nnsHash, domain)
|
||||
res, err := nnsResolve(inv, nnsHash, domain)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
v, ok := item.Value().(stackitem.Null)
|
||||
if ok {
|
||||
if _, ok := res.Value().(stackitem.Null); ok {
|
||||
return nil, errors.New("NNS record is missing")
|
||||
}
|
||||
bs, err := v.TryBytes()
|
||||
arr, ok := res.Value().([]stackitem.Item)
|
||||
if !ok {
|
||||
return nil, errors.New("API of the NNS contract method `resolve` has changed")
|
||||
}
|
||||
for i := range arr {
|
||||
var bs []byte
|
||||
bs, err = arr[i].TryBytes()
|
||||
if err != nil {
|
||||
return nil, errors.New("malformed response")
|
||||
continue
|
||||
}
|
||||
|
||||
return keys.NewPublicKeyFromString(string(bs))
|
||||
}
|
||||
return nil, errors.New("no valid keys are found")
|
||||
}
|
||||
|
||||
// parseNNSResolveResult parses the result of resolving NNS record.
|
||||
// It works with multiple formats (corresponding to multiple NNS versions).
|
||||
|
|
|
@ -24,7 +24,7 @@ func PrettyPrintTableBACL(cmd *cobra.Command, bacl *acl.Basic) {
|
|||
fmt.Fprintln(w, "\tRangeHASH\tRange\tSearch\tDelete\tPut\tHead\tGet")
|
||||
// Bits
|
||||
bits := []string{
|
||||
boolToString(bacl.Sticky()) + " " + boolToString(bacl.Extendable()),
|
||||
boolToString(bacl.Sticky()) + " " + boolToString(!bacl.Extendable()),
|
||||
getRoleBitsForOperation(bacl, acl.OpObjectHash), getRoleBitsForOperation(bacl, acl.OpObjectRange),
|
||||
getRoleBitsForOperation(bacl, acl.OpObjectSearch), getRoleBitsForOperation(bacl, acl.OpObjectDelete),
|
||||
getRoleBitsForOperation(bacl, acl.OpObjectPut), getRoleBitsForOperation(bacl, acl.OpObjectHead),
|
||||
|
@ -47,7 +47,7 @@ func getRoleBitsForOperation(bacl *acl.Basic, op acl.Op) string {
|
|||
return boolToString(bacl.IsOpAllowed(op, acl.RoleOwner)) + " " +
|
||||
boolToString(bacl.IsOpAllowed(op, acl.RoleContainer)) + " " +
|
||||
boolToString(bacl.IsOpAllowed(op, acl.RoleOthers)) + " " +
|
||||
boolToString(bacl.IsOpAllowed(op, acl.RoleInnerRing))
|
||||
boolToString(bacl.AllowedBearerRules(op))
|
||||
}
|
||||
|
||||
func boolToString(b bool) string {
|
||||
|
|
6
debian/frostfs-ir.postinst
vendored
Normal file → Executable file
6
debian/frostfs-ir.postinst
vendored
Normal file → Executable file
|
@ -28,9 +28,9 @@ case "$1" in
|
|||
chmod -f 0640 /etc/frostfs/$USERNAME/config.yml || true
|
||||
chmod -f 0640 /etc/frostfs/$USERNAME/control.yml || true
|
||||
fi
|
||||
USERDIR=$(getent passwd "frostfs-$USERNAME" | cut -d: -f6)
|
||||
if ! dpkg-statoverride --list frostfs-$USERDIR >/dev/null; then
|
||||
chown -f frostfs-$USERNAME: $USERDIR
|
||||
USERDIR="$(getent passwd frostfs-$USERNAME | cut -d: -f6)"
|
||||
if ! dpkg-statoverride --list frostfs-"$USERDIR" >/dev/null; then
|
||||
chown -f frostfs-$USERNAME: "$USERDIR"
|
||||
fi
|
||||
;;
|
||||
|
||||
|
|
0
debian/frostfs-ir.postrm
vendored
Normal file → Executable file
0
debian/frostfs-ir.postrm
vendored
Normal file → Executable file
0
debian/frostfs-ir.preinst
vendored
Normal file → Executable file
0
debian/frostfs-ir.preinst
vendored
Normal file → Executable file
0
debian/frostfs-ir.prerm
vendored
Normal file → Executable file
0
debian/frostfs-ir.prerm
vendored
Normal file → Executable file
6
debian/frostfs-storage.postinst
vendored
Normal file → Executable file
6
debian/frostfs-storage.postinst
vendored
Normal file → Executable file
|
@ -28,9 +28,9 @@ case "$1" in
|
|||
chmod -f 0640 /etc/frostfs/$USERNAME/config.yml || true
|
||||
chmod -f 0640 /etc/frostfs/$USERNAME/control.yml || true
|
||||
fi
|
||||
USERDIR=$(getent passwd "frostfs-$USERNAME" | cut -d: -f6)
|
||||
if ! dpkg-statoverride --list frostfs-$USERDIR >/dev/null; then
|
||||
chown -f frostfs-$USERNAME: $USERDIR
|
||||
USERDIR=$(getent passwd frostfs-$USERNAME | cut -d: -f6)
|
||||
if ! dpkg-statoverride --list frostfs-"$USERDIR" >/dev/null; then
|
||||
chown -f frostfs-$USERNAME: "$USERDIR"
|
||||
fi
|
||||
USERDIR=/srv/frostfs
|
||||
if ! dpkg-statoverride --list frostfs-$USERDIR >/dev/null; then
|
||||
|
|
0
debian/frostfs-storage.postrm
vendored
Normal file → Executable file
0
debian/frostfs-storage.postrm
vendored
Normal file → Executable file
0
debian/frostfs-storage.preinst
vendored
Normal file → Executable file
0
debian/frostfs-storage.preinst
vendored
Normal file → Executable file
0
debian/frostfs-storage.prerm
vendored
Normal file → Executable file
0
debian/frostfs-storage.prerm
vendored
Normal file → Executable file
|
@ -925,7 +925,7 @@ func New(ctx context.Context, log *logger.Logger, cfg *viper.Viper, errChan chan
|
|||
queueSize: cfg.GetUint32("workers.subnet"),
|
||||
})
|
||||
|
||||
if cfg.GetString("metrics.address") != "" {
|
||||
if cfg.GetString("prometheus.address") != "" {
|
||||
m := metrics.NewInnerRingMetrics()
|
||||
server.metrics = &m
|
||||
}
|
||||
|
|
|
@ -167,6 +167,9 @@ func (s *Server) ResetEpochTimer(h uint32) error {
|
|||
|
||||
func (s *Server) setHealthStatus(hs control.HealthStatus) {
|
||||
s.healthStatus.Store(hs)
|
||||
if s.metrics != nil {
|
||||
s.metrics.SetHealth(int32(hs))
|
||||
}
|
||||
}
|
||||
|
||||
// HealthStatus returns the current health status of the IR application.
|
||||
|
|
|
@ -213,6 +213,44 @@ func (e *StorageEngine) TreeExists(cid cidSDK.ID, treeID string) (bool, error) {
|
|||
return err == nil, err
|
||||
}
|
||||
|
||||
// TreeUpdateLastSyncHeight implements the pilorama.Forest interface.
|
||||
func (e *StorageEngine) TreeUpdateLastSyncHeight(cid cidSDK.ID, treeID string, height uint64) error {
|
||||
index, lst, err := e.getTreeShard(cid, treeID)
|
||||
if err != nil && !errors.Is(err, pilorama.ErrTreeNotFound) {
|
||||
return err
|
||||
}
|
||||
|
||||
err = lst[index].TreeUpdateLastSyncHeight(cid, treeID, height)
|
||||
if err != nil && !errors.Is(err, shard.ErrReadOnlyMode) && err != shard.ErrPiloramaDisabled {
|
||||
e.reportShardError(lst[index], "can't update tree synchronization height", err,
|
||||
zap.Stringer("cid", cid),
|
||||
zap.String("tree", treeID))
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// TreeLastSyncHeight implements the pilorama.Forest interface.
|
||||
func (e *StorageEngine) TreeLastSyncHeight(cid cidSDK.ID, treeID string) (uint64, error) {
|
||||
var err error
|
||||
var height uint64
|
||||
for _, sh := range e.sortShardsByWeight(cid) {
|
||||
height, err = sh.TreeLastSyncHeight(cid, treeID)
|
||||
if err != nil {
|
||||
if err == shard.ErrPiloramaDisabled {
|
||||
break
|
||||
}
|
||||
if !errors.Is(err, pilorama.ErrTreeNotFound) {
|
||||
e.reportShardError(sh, "can't read tree synchronization height", err,
|
||||
zap.Stringer("cid", cid),
|
||||
zap.String("tree", treeID))
|
||||
}
|
||||
continue
|
||||
}
|
||||
return height, err
|
||||
}
|
||||
return height, err
|
||||
}
|
||||
|
||||
func (e *StorageEngine) getTreeShard(cid cidSDK.ID, treeID string) (int, []hashedShard, error) {
|
||||
lst := e.sortShardsByWeight(cid)
|
||||
for i, sh := range lst {
|
||||
|
|
|
@ -192,6 +192,46 @@ func (t *boltForest) TreeExists(cid cidSDK.ID, treeID string) (bool, error) {
|
|||
return exists, err
|
||||
}
|
||||
|
||||
var syncHeightKey = []byte{'h'}
|
||||
|
||||
// TreeUpdateLastSyncHeight implements the pilorama.Forest interface.
|
||||
func (t *boltForest) TreeUpdateLastSyncHeight(cid cidSDK.ID, treeID string, height uint64) error {
|
||||
rawHeight := make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(rawHeight, height)
|
||||
|
||||
buck := bucketName(cid, treeID)
|
||||
return t.db.Batch(func(tx *bbolt.Tx) error {
|
||||
treeRoot := tx.Bucket(buck)
|
||||
if treeRoot == nil {
|
||||
return ErrTreeNotFound
|
||||
}
|
||||
|
||||
b := treeRoot.Bucket(dataBucket)
|
||||
return b.Put(syncHeightKey, rawHeight)
|
||||
})
|
||||
}
|
||||
|
||||
// TreeLastSyncHeight implements the pilorama.Forest interface.
|
||||
func (t *boltForest) TreeLastSyncHeight(cid cidSDK.ID, treeID string) (uint64, error) {
|
||||
var height uint64
|
||||
|
||||
buck := bucketName(cid, treeID)
|
||||
err := t.db.View(func(tx *bbolt.Tx) error {
|
||||
treeRoot := tx.Bucket(buck)
|
||||
if treeRoot == nil {
|
||||
return ErrTreeNotFound
|
||||
}
|
||||
|
||||
b := treeRoot.Bucket(dataBucket)
|
||||
data := b.Get(syncHeightKey)
|
||||
if len(data) == 8 {
|
||||
height = binary.LittleEndian.Uint64(data)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return height, err
|
||||
}
|
||||
|
||||
// TreeAddByPath implements the Forest interface.
|
||||
func (t *boltForest) TreeAddByPath(d CIDDescriptor, treeID string, attr string, path []string, meta []KeyValue) ([]Move, error) {
|
||||
if !d.checkValid() {
|
||||
|
|
|
@ -226,3 +226,24 @@ func (f *memoryForest) TreeExists(cid cidSDK.ID, treeID string) (bool, error) {
|
|||
_, ok := f.treeMap[fullID]
|
||||
return ok, nil
|
||||
}
|
||||
|
||||
// TreeUpdateLastSyncHeight implements the pilorama.Forest interface.
|
||||
func (f *memoryForest) TreeUpdateLastSyncHeight(cid cidSDK.ID, treeID string, height uint64) error {
|
||||
fullID := cid.EncodeToString() + "/" + treeID
|
||||
t, ok := f.treeMap[fullID]
|
||||
if !ok {
|
||||
return ErrTreeNotFound
|
||||
}
|
||||
t.syncHeight = height
|
||||
return nil
|
||||
}
|
||||
|
||||
// TreeLastSyncHeight implements the pilorama.Forest interface.
|
||||
func (f *memoryForest) TreeLastSyncHeight(cid cidSDK.ID, treeID string) (uint64, error) {
|
||||
fullID := cid.EncodeToString() + "/" + treeID
|
||||
t, ok := f.treeMap[fullID]
|
||||
if !ok {
|
||||
return 0, ErrTreeNotFound
|
||||
}
|
||||
return t.syncHeight, nil
|
||||
}
|
||||
|
|
|
@ -1030,3 +1030,52 @@ func testTreeGetTrees(t *testing.T, s Forest) {
|
|||
require.ElementsMatch(t, treeIDs[cid], trees)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTreeLastSyncHeight(t *testing.T) {
|
||||
for i := range providers {
|
||||
t.Run(providers[i].name, func(t *testing.T) {
|
||||
testTreeLastSyncHeight(t, providers[i].construct(t))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testTreeLastSyncHeight(t *testing.T, f Forest) {
|
||||
cnr := cidtest.ID()
|
||||
treeID := "someTree"
|
||||
|
||||
t.Run("ErrNotFound if no log operations are stored for a tree", func(t *testing.T) {
|
||||
_, err := f.TreeLastSyncHeight(cnr, treeID)
|
||||
require.ErrorIs(t, err, ErrTreeNotFound)
|
||||
|
||||
err = f.TreeUpdateLastSyncHeight(cnr, treeID, 1)
|
||||
require.ErrorIs(t, err, ErrTreeNotFound)
|
||||
})
|
||||
|
||||
_, err := f.TreeMove(CIDDescriptor{CID: cnr, Size: 1}, treeID, &Move{
|
||||
Parent: RootID,
|
||||
Child: 1,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
h, err := f.TreeLastSyncHeight(cnr, treeID)
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, 0, h)
|
||||
|
||||
t.Run("separate storages for separate containers", func(t *testing.T) {
|
||||
_, err := f.TreeLastSyncHeight(cidtest.ID(), treeID)
|
||||
require.ErrorIs(t, err, ErrTreeNotFound)
|
||||
})
|
||||
|
||||
require.NoError(t, f.TreeUpdateLastSyncHeight(cnr, treeID, 10))
|
||||
|
||||
h, err = f.TreeLastSyncHeight(cnr, treeID)
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, 10, h)
|
||||
|
||||
t.Run("removed correctly", func(t *testing.T) {
|
||||
require.NoError(t, f.TreeDrop(cnr, treeID))
|
||||
|
||||
_, err := f.TreeLastSyncHeight(cnr, treeID)
|
||||
require.ErrorIs(t, err, ErrTreeNotFound)
|
||||
})
|
||||
}
|
||||
|
|
|
@ -143,6 +143,7 @@ func (s *state) findSpareID() Node {
|
|||
|
||||
// tree is a mapping from the child nodes to their parent and metadata.
|
||||
type tree struct {
|
||||
syncHeight uint64
|
||||
infoMap map[Node]nodeInfo
|
||||
childMap map[Node][]Node
|
||||
}
|
||||
|
|
|
@ -44,6 +44,10 @@ type Forest interface {
|
|||
// TreeExists checks if a tree exists locally.
|
||||
// If the tree is not found, false and a nil error should be returned.
|
||||
TreeExists(cid cidSDK.ID, treeID string) (bool, error)
|
||||
// TreeUpdateLastSyncHeight updates last log height synchronized with _all_ container nodes.
|
||||
TreeUpdateLastSyncHeight(cid cidSDK.ID, treeID string, height uint64) error
|
||||
// TreeLastSyncHeight returns last log height synchronized with _all_ container nodes.
|
||||
TreeLastSyncHeight(cid cidSDK.ID, treeID string) (uint64, error)
|
||||
}
|
||||
|
||||
type ForestStorage interface {
|
||||
|
|
|
@ -111,3 +111,19 @@ func (s *Shard) TreeExists(cid cidSDK.ID, treeID string) (bool, error) {
|
|||
}
|
||||
return s.pilorama.TreeExists(cid, treeID)
|
||||
}
|
||||
|
||||
// TreeUpdateLastSyncHeight implements the pilorama.Forest interface.
|
||||
func (s *Shard) TreeUpdateLastSyncHeight(cid cidSDK.ID, treeID string, height uint64) error {
|
||||
if s.pilorama == nil {
|
||||
return ErrPiloramaDisabled
|
||||
}
|
||||
return s.pilorama.TreeUpdateLastSyncHeight(cid, treeID, height)
|
||||
}
|
||||
|
||||
// TreeLastSyncHeight implements the pilorama.Forest interface.
|
||||
func (s *Shard) TreeLastSyncHeight(cid cidSDK.ID, treeID string) (uint64, error) {
|
||||
if s.pilorama == nil {
|
||||
return 0, ErrPiloramaDisabled
|
||||
}
|
||||
return s.pilorama.TreeLastSyncHeight(cid, treeID)
|
||||
}
|
||||
|
|
|
@ -2,11 +2,12 @@ package metrics
|
|||
|
||||
import "github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
const innerRingSubsystem = "object"
|
||||
const innerRingSubsystem = "ir"
|
||||
|
||||
// InnerRingServiceMetrics contains metrics collected by inner ring.
|
||||
type InnerRingServiceMetrics struct {
|
||||
epoch prometheus.Gauge
|
||||
health prometheus.Gauge
|
||||
}
|
||||
|
||||
// NewInnerRingMetrics returns new instance of metrics collectors for inner ring.
|
||||
|
@ -18,12 +19,20 @@ func NewInnerRingMetrics() InnerRingServiceMetrics {
|
|||
Name: "epoch",
|
||||
Help: "Current epoch as seen by inner-ring node.",
|
||||
})
|
||||
health = prometheus.NewGauge(prometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: innerRingSubsystem,
|
||||
Name: "health",
|
||||
Help: "Current inner-ring node state.",
|
||||
})
|
||||
)
|
||||
|
||||
prometheus.MustRegister(epoch)
|
||||
prometheus.MustRegister(health)
|
||||
|
||||
return InnerRingServiceMetrics{
|
||||
epoch: epoch,
|
||||
health: health,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -31,3 +40,8 @@ func NewInnerRingMetrics() InnerRingServiceMetrics {
|
|||
func (m InnerRingServiceMetrics) SetEpoch(epoch uint64) {
|
||||
m.epoch.Set(float64(epoch))
|
||||
}
|
||||
|
||||
// SetHealth updates health metrics.
|
||||
func (m InnerRingServiceMetrics) SetHealth(s int32) {
|
||||
m.health.Set(float64(s))
|
||||
}
|
||||
|
|
|
@ -127,7 +127,7 @@ func (p *Policer) processObject(ctx context.Context, addrWithType objectcore.Add
|
|||
p.processNodes(c, addrWithType, nn[i], policy.ReplicaNumberByIndex(i), checkedNodes)
|
||||
}
|
||||
|
||||
if !c.needLocalCopy {
|
||||
if !c.needLocalCopy && c.removeLocalCopy {
|
||||
p.log.Info("redundant local object copy detected",
|
||||
zap.Stringer("object", addr),
|
||||
)
|
||||
|
@ -139,7 +139,11 @@ func (p *Policer) processObject(ctx context.Context, addrWithType objectcore.Add
|
|||
type processPlacementContext struct {
|
||||
context.Context
|
||||
|
||||
// needLocalCopy is true if the current node must store an object according to the storage policy.
|
||||
needLocalCopy bool
|
||||
// removeLocalCopy is true if all copies are stored according to the storage policy
|
||||
// and the current node doesn't need to store an object.
|
||||
removeLocalCopy bool
|
||||
}
|
||||
|
||||
func (p *Policer) processNodes(ctx *processPlacementContext, addrWithType objectcore.AddressWithType,
|
||||
|
@ -242,9 +246,11 @@ func (p *Policer) processNodes(ctx *processPlacementContext, addrWithType object
|
|||
} else if uncheckedCopies > 0 {
|
||||
// If we have more copies than needed, but some of them are from the maintenance nodes,
|
||||
// save the local copy.
|
||||
ctx.needLocalCopy = true
|
||||
p.log.Debug("some of the copies are stored on nodes under maintenance, save local copy",
|
||||
zap.Int("count", uncheckedCopies))
|
||||
} else if uncheckedCopies == 0 {
|
||||
// Safe to remove: checked all copies, shortage == 0.
|
||||
ctx.removeLocalCopy = true
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -31,10 +31,8 @@ type Service struct {
|
|||
syncChan chan struct{}
|
||||
syncPool *ants.Pool
|
||||
|
||||
// cnrMap maps contrainer and tree ID to the minimum height which was fetched from _each_ client.
|
||||
// This allows us to better handle split-brain scenario, because we always synchronize
|
||||
// from the last seen height. The inner map is read-only and should not be modified in-place.
|
||||
cnrMap map[cidSDK.ID]map[string]uint64
|
||||
// cnrMap contains existing (used) container IDs.
|
||||
cnrMap map[cidSDK.ID]struct{}
|
||||
// cnrMapMtx protects cnrMap
|
||||
cnrMapMtx sync.Mutex
|
||||
}
|
||||
|
@ -63,7 +61,7 @@ func New(opts ...Option) *Service {
|
|||
s.replicateLocalCh = make(chan applyOp)
|
||||
s.replicationTasks = make(chan replicationTask, s.replicatorWorkerCount)
|
||||
s.containerCache.init(s.containerCacheSize)
|
||||
s.cnrMap = make(map[cidSDK.ID]map[string]uint64)
|
||||
s.cnrMap = make(map[cidSDK.ID]struct{})
|
||||
s.syncChan = make(chan struct{})
|
||||
s.syncPool, _ = ants.NewPool(defaultSyncWorkerCount)
|
||||
|
||||
|
|
|
@ -86,30 +86,23 @@ func (s *Service) synchronizeAllTrees(ctx context.Context, cid cid.ID) error {
|
|||
return fmt.Errorf("could not fetch tree ID list: %w", outErr)
|
||||
}
|
||||
|
||||
s.cnrMapMtx.Lock()
|
||||
oldStatus := s.cnrMap[cid]
|
||||
s.cnrMapMtx.Unlock()
|
||||
|
||||
syncStatus := map[string]uint64{}
|
||||
for i := range treesToSync {
|
||||
syncStatus[treesToSync[i]] = 0
|
||||
}
|
||||
for tid := range oldStatus {
|
||||
if _, ok := syncStatus[tid]; ok {
|
||||
syncStatus[tid] = oldStatus[tid]
|
||||
}
|
||||
}
|
||||
|
||||
for _, tid := range treesToSync {
|
||||
h := s.synchronizeTree(ctx, d, syncStatus[tid], tid, nodes)
|
||||
if syncStatus[tid] < h {
|
||||
syncStatus[tid] = h
|
||||
h, err := s.forest.TreeLastSyncHeight(d.CID, tid)
|
||||
if err != nil && !errors.Is(err, pilorama.ErrTreeNotFound) {
|
||||
s.log.Warn("could not get last synchronized height for a tree",
|
||||
zap.Stringer("cid", d.CID),
|
||||
zap.String("tree", tid))
|
||||
continue
|
||||
}
|
||||
newHeight := s.synchronizeTree(ctx, d, h, tid, nodes)
|
||||
if h < newHeight {
|
||||
if err := s.forest.TreeUpdateLastSyncHeight(d.CID, tid, newHeight); err != nil {
|
||||
s.log.Warn("could not update last synchronized height for a tree",
|
||||
zap.Stringer("cid", d.CID),
|
||||
zap.String("tree", tid))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
s.cnrMapMtx.Lock()
|
||||
s.cnrMap[cid] = syncStatus
|
||||
s.cnrMapMtx.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -288,7 +281,6 @@ func (s *Service) syncLoop(ctx context.Context) {
|
|||
s.log.Error("could not calculate container nodes",
|
||||
zap.Stringer("cid", cnr),
|
||||
zap.Error(err))
|
||||
removed = append(removed, cnr)
|
||||
continue
|
||||
}
|
||||
|
||||
|
|
|
@ -43,7 +43,7 @@ func Command(name string) *cobra.Command {
|
|||
name, name, name, name, name, name, name, name, name, name),
|
||||
DisableFlagsInUseLine: true,
|
||||
ValidArgs: []string{"bash", "zsh", "fish", "powershell"},
|
||||
Args: cobra.ExactValidArgs(1),
|
||||
Args: cobra.MatchAll(cobra.ExactArgs(1), cobra.OnlyValidArgs),
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
switch args[0] {
|
||||
case "bash":
|
||||
|
|
Loading…
Add table
Reference in a new issue