forked from TrueCloudLab/frostfs-node
[#1132] *: Use path/filepath
package when working with files
Signed-off-by: Evgenii Stratonikov <evgeniy@nspcc.ru>
This commit is contained in:
parent
0decb95591
commit
674f520da7
20 changed files with 73 additions and 77 deletions
|
@ -4,7 +4,6 @@ import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"text/template"
|
"text/template"
|
||||||
|
|
||||||
|
@ -51,7 +50,7 @@ func initConfig(cmd *cobra.Command, args []string) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
pathDir := path.Dir(configPath)
|
pathDir := filepath.Dir(configPath)
|
||||||
err = os.MkdirAll(pathDir, 0700)
|
err = os.MkdirAll(pathDir, 0700)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("create dir %s: %w", pathDir, err)
|
return fmt.Errorf("create dir %s: %w", pathDir, err)
|
||||||
|
@ -97,7 +96,7 @@ func defaultConfigPath() (string, error) {
|
||||||
return "", fmt.Errorf("getting home dir path: %w", err)
|
return "", fmt.Errorf("getting home dir path: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return path.Join(home, ".neofs", "adm", "config.yml"), nil
|
return filepath.Join(home, ".neofs", "adm", "config.yml"), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// generateConfigExample builds .yml representation of the config file. It is
|
// generateConfigExample builds .yml representation of the config file. It is
|
||||||
|
@ -122,7 +121,7 @@ func generateConfigExample(appDir string, credSize int) (string, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("making absolute path for %s: %w", appDir, err)
|
return "", fmt.Errorf("making absolute path for %s: %w", appDir, err)
|
||||||
}
|
}
|
||||||
tmpl.AlphabetDir = path.Join(appDir, "alphabet-wallets")
|
tmpl.AlphabetDir = filepath.Join(appDir, "alphabet-wallets")
|
||||||
|
|
||||||
var i innerring.GlagoliticLetter
|
var i innerring.GlagoliticLetter
|
||||||
for i = 0; i < innerring.GlagoliticLetter(credSize); i++ {
|
for i = 0; i < innerring.GlagoliticLetter(credSize); i++ {
|
||||||
|
|
|
@ -2,7 +2,7 @@ package config
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"path"
|
"path/filepath"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/nspcc-dev/neofs-node/pkg/innerring"
|
"github.com/nspcc-dev/neofs-node/pkg/innerring"
|
||||||
|
@ -25,7 +25,7 @@ func TestGenerateConfigExample(t *testing.T) {
|
||||||
require.NoError(t, v.ReadConfig(bytes.NewBufferString(configText)))
|
require.NoError(t, v.ReadConfig(bytes.NewBufferString(configText)))
|
||||||
|
|
||||||
require.Equal(t, "https://neo.rpc.node:30333", v.GetString("rpc-endpoint"))
|
require.Equal(t, "https://neo.rpc.node:30333", v.GetString("rpc-endpoint"))
|
||||||
require.Equal(t, path.Join(appDir, "alphabet-wallets"), v.GetString("alphabet-wallets"))
|
require.Equal(t, filepath.Join(appDir, "alphabet-wallets"), v.GetString("alphabet-wallets"))
|
||||||
require.Equal(t, 67108864, v.GetInt("network.max_object_size"))
|
require.Equal(t, 67108864, v.GetInt("network.max_object_size"))
|
||||||
require.Equal(t, 240, v.GetInt("network.epoch_duration"))
|
require.Equal(t, 240, v.GetInt("network.epoch_duration"))
|
||||||
require.Equal(t, 100000000, v.GetInt("network.basic_income_rate"))
|
require.Equal(t, 100000000, v.GetInt("network.basic_income_rate"))
|
||||||
|
|
|
@ -3,7 +3,7 @@ package morph
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"path"
|
"path/filepath"
|
||||||
|
|
||||||
"github.com/nspcc-dev/neo-go/cli/input"
|
"github.com/nspcc-dev/neo-go/cli/input"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/core/native/nativenames"
|
"github.com/nspcc-dev/neo-go/pkg/core/native/nativenames"
|
||||||
|
@ -62,7 +62,7 @@ func initializeWallets(walletDir string, size int) ([]string, error) {
|
||||||
return nil, fmt.Errorf("can't fetch password: %w", err)
|
return nil, fmt.Errorf("can't fetch password: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
p := path.Join(walletDir, innerring.GlagoliticLetter(i).String()+".json")
|
p := filepath.Join(walletDir, innerring.GlagoliticLetter(i).String()+".json")
|
||||||
// TODO(@fyrchik): file is created with 0666 permissions, consider changing.
|
// TODO(@fyrchik): file is created with 0666 permissions, consider changing.
|
||||||
w, err := wallet.NewWallet(p)
|
w, err := wallet.NewWallet(p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -5,7 +5,7 @@ import (
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path/filepath"
|
||||||
"strconv"
|
"strconv"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
@ -42,7 +42,7 @@ func TestGenerateAlphabet(t *testing.T) {
|
||||||
})
|
})
|
||||||
t.Run("missing directory", func(t *testing.T) {
|
t.Run("missing directory", func(t *testing.T) {
|
||||||
buf.Reset()
|
buf.Reset()
|
||||||
dir := path.Join(os.TempDir(), "notexist."+strconv.FormatUint(rand.Uint64(), 10))
|
dir := filepath.Join(os.TempDir(), "notexist."+strconv.FormatUint(rand.Uint64(), 10))
|
||||||
v.Set(alphabetWalletsFlag, dir)
|
v.Set(alphabetWalletsFlag, dir)
|
||||||
require.NoError(t, cmd.Flags().Set(alphabetSizeFlag, "1"))
|
require.NoError(t, cmd.Flags().Set(alphabetSizeFlag, "1"))
|
||||||
buf.WriteString("pass\r")
|
buf.WriteString("pass\r")
|
||||||
|
@ -59,7 +59,7 @@ func TestGenerateAlphabet(t *testing.T) {
|
||||||
require.NoError(t, generateAlphabetCreds(cmd, nil))
|
require.NoError(t, generateAlphabetCreds(cmd, nil))
|
||||||
|
|
||||||
for i := uint64(0); i < size; i++ {
|
for i := uint64(0); i < size; i++ {
|
||||||
p := path.Join(walletDir, innerring.GlagoliticLetter(i).String()+".json")
|
p := filepath.Join(walletDir, innerring.GlagoliticLetter(i).String()+".json")
|
||||||
w, err := wallet.NewWalletFromFile(p)
|
w, err := wallet.NewWalletFromFile(p)
|
||||||
require.NoError(t, err, "wallet doesn't exist")
|
require.NoError(t, err, "wallet doesn't exist")
|
||||||
require.Equal(t, 3, len(w.Accounts), "not all accounts were created")
|
require.Equal(t, 3, len(w.Accounts), "not all accounts were created")
|
||||||
|
@ -91,7 +91,7 @@ func setupTestTerminal(t *testing.T) *bytes.Buffer {
|
||||||
}
|
}
|
||||||
|
|
||||||
func newTempDir(t *testing.T) string {
|
func newTempDir(t *testing.T) string {
|
||||||
dir := path.Join(os.TempDir(), "neofs-adm.test."+strconv.FormatUint(rand.Uint64(), 10))
|
dir := filepath.Join(os.TempDir(), "neofs-adm.test."+strconv.FormatUint(rand.Uint64(), 10))
|
||||||
require.NoError(t, os.Mkdir(dir, os.ModePerm))
|
require.NoError(t, os.Mkdir(dir, os.ModePerm))
|
||||||
t.Cleanup(func() {
|
t.Cleanup(func() {
|
||||||
require.NoError(t, os.RemoveAll(dir))
|
require.NoError(t, os.RemoveAll(dir))
|
||||||
|
|
|
@ -4,7 +4,7 @@ import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"path"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
@ -192,7 +192,7 @@ loop:
|
||||||
|
|
||||||
wallets := make([]*wallet.Wallet, size)
|
wallets := make([]*wallet.Wallet, size)
|
||||||
for i := 0; i < size; i++ {
|
for i := 0; i < size; i++ {
|
||||||
p := path.Join(walletDir, innerring.GlagoliticLetter(i).String()+".json")
|
p := filepath.Join(walletDir, innerring.GlagoliticLetter(i).String()+".json")
|
||||||
w, err := wallet.NewWalletFromFile(p)
|
w, err := wallet.NewWalletFromFile(p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("can't open wallet: %w", err)
|
return nil, fmt.Errorf("can't open wallet: %w", err)
|
||||||
|
|
|
@ -9,7 +9,7 @@ import (
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/nspcc-dev/neo-go/pkg/core/native"
|
"github.com/nspcc-dev/neo-go/pkg/core/native"
|
||||||
|
@ -370,11 +370,11 @@ func (c *initializeContext) readContracts(names []string) error {
|
||||||
if c.ContractPath != "" && fi.IsDir() {
|
if c.ContractPath != "" && fi.IsDir() {
|
||||||
for _, ctrName := range names {
|
for _, ctrName := range names {
|
||||||
cs := new(contractState)
|
cs := new(contractState)
|
||||||
cs.RawNEF, err = ioutil.ReadFile(path.Join(c.ContractPath, ctrName, ctrName+"_contract.nef"))
|
cs.RawNEF, err = ioutil.ReadFile(filepath.Join(c.ContractPath, ctrName, ctrName+"_contract.nef"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("can't read NEF file for %s contract: %w", ctrName, err)
|
return fmt.Errorf("can't read NEF file for %s contract: %w", ctrName, err)
|
||||||
}
|
}
|
||||||
cs.RawManifest, err = ioutil.ReadFile(path.Join(c.ContractPath, ctrName, "config.json"))
|
cs.RawManifest, err = ioutil.ReadFile(filepath.Join(c.ContractPath, ctrName, "config.json"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("can't read manifest file for %s contract: %w", ctrName, err)
|
return fmt.Errorf("can't read manifest file for %s contract: %w", ctrName, err)
|
||||||
}
|
}
|
||||||
|
@ -449,8 +449,8 @@ func readContractsFromArchive(file io.Reader, names []string) (map[string]*contr
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
dir, _ := path.Split(h.Name)
|
dir, _ := filepath.Split(h.Name)
|
||||||
ctrName := path.Base(dir)
|
ctrName := filepath.Base(dir)
|
||||||
|
|
||||||
cs, ok := m[ctrName]
|
cs, ok := m[ctrName]
|
||||||
if !ok {
|
if !ok {
|
||||||
|
@ -458,7 +458,7 @@ func readContractsFromArchive(file io.Reader, names []string) (map[string]*contr
|
||||||
}
|
}
|
||||||
|
|
||||||
switch {
|
switch {
|
||||||
case strings.HasSuffix(h.Name, path.Join(ctrName, ctrName+"_contract.nef")):
|
case strings.HasSuffix(h.Name, filepath.Join(ctrName, ctrName+"_contract.nef")):
|
||||||
cs.RawNEF, err = ioutil.ReadAll(r)
|
cs.RawNEF, err = ioutil.ReadAll(r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("can't read NEF file for %s contract: %w", ctrName, err)
|
return nil, fmt.Errorf("can't read NEF file for %s contract: %w", ctrName, err)
|
||||||
|
|
|
@ -5,7 +5,6 @@ import (
|
||||||
"errors"
|
"errors"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
@ -20,7 +19,7 @@ import (
|
||||||
func Test_getKey(t *testing.T) {
|
func Test_getKey(t *testing.T) {
|
||||||
dir := t.TempDir()
|
dir := t.TempDir()
|
||||||
|
|
||||||
wallPath := path.Join(dir, "wallet.json")
|
wallPath := filepath.Join(dir, "wallet.json")
|
||||||
w, err := wallet.NewWallet(wallPath)
|
w, err := wallet.NewWallet(wallPath)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
@ -37,7 +36,7 @@ func Test_getKey(t *testing.T) {
|
||||||
require.NoError(t, w.Save())
|
require.NoError(t, w.Save())
|
||||||
w.Close()
|
w.Close()
|
||||||
|
|
||||||
keyPath := path.Join(dir, "binary.key")
|
keyPath := filepath.Join(dir, "binary.key")
|
||||||
rawKey, err := keys.NewPrivateKey()
|
rawKey, err := keys.NewPrivateKey()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.NoError(t, ioutil.WriteFile(keyPath, rawKey.Bytes(), os.ModePerm))
|
require.NoError(t, ioutil.WriteFile(keyPath, rawKey.Bytes(), os.ModePerm))
|
||||||
|
|
|
@ -3,7 +3,7 @@ package main
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"net"
|
"net"
|
||||||
"path"
|
"path/filepath"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
@ -390,7 +390,7 @@ func initShardOptions(c *cfg) {
|
||||||
|
|
||||||
metaPath := metabaseCfg.Path()
|
metaPath := metabaseCfg.Path()
|
||||||
metaPerm := metabaseCfg.Perm()
|
metaPerm := metabaseCfg.Perm()
|
||||||
fatalOnErr(util.MkdirAllX(path.Dir(metaPath), metaPerm))
|
fatalOnErr(util.MkdirAllX(filepath.Dir(metaPath), metaPerm))
|
||||||
|
|
||||||
opts = append(opts, []shard.Option{
|
opts = append(opts, []shard.Option{
|
||||||
shard.WithLogger(c.log),
|
shard.WithLogger(c.log),
|
||||||
|
|
|
@ -2,7 +2,7 @@ package blobovnicza
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"path"
|
"path/filepath"
|
||||||
|
|
||||||
"github.com/nspcc-dev/neofs-node/pkg/util"
|
"github.com/nspcc-dev/neofs-node/pkg/util"
|
||||||
"go.etcd.io/bbolt"
|
"go.etcd.io/bbolt"
|
||||||
|
@ -21,7 +21,7 @@ func (b *Blobovnicza) Open() error {
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
if !b.boltOptions.ReadOnly {
|
if !b.boltOptions.ReadOnly {
|
||||||
err = util.MkdirAllX(path.Dir(b.path), b.perm)
|
err = util.MkdirAllX(filepath.Dir(b.path), b.perm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,7 +4,7 @@ import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"path"
|
"path/filepath"
|
||||||
"strconv"
|
"strconv"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
|
@ -88,7 +88,7 @@ var errPutFailed = errors.New("could not save the object in any blobovnicza")
|
||||||
|
|
||||||
func newBlobovniczaTree(c *cfg) (blz *blobovniczas) {
|
func newBlobovniczaTree(c *cfg) (blz *blobovniczas) {
|
||||||
cache, err := simplelru.NewLRU(c.openedCacheSize, func(key interface{}, value interface{}) {
|
cache, err := simplelru.NewLRU(c.openedCacheSize, func(key interface{}, value interface{}) {
|
||||||
if _, ok := blz.active[path.Dir(key.(string))]; ok {
|
if _, ok := blz.active[filepath.Dir(key.(string))]; ok {
|
||||||
return
|
return
|
||||||
} else if err := value.(*blobovnicza.Blobovnicza).Close(); err != nil {
|
} else if err := value.(*blobovnicza.Blobovnicza).Close(); err != nil {
|
||||||
c.log.Error("could not close Blobovnicza",
|
c.log.Error("could not close Blobovnicza",
|
||||||
|
@ -156,7 +156,7 @@ func (b *blobovniczas) put(addr *objectSDK.Address, data []byte) (*blobovnicza.I
|
||||||
// check if blobovnicza is full
|
// check if blobovnicza is full
|
||||||
if errors.Is(err, blobovnicza.ErrFull) {
|
if errors.Is(err, blobovnicza.ErrFull) {
|
||||||
b.log.Debug("blobovnicza overflowed",
|
b.log.Debug("blobovnicza overflowed",
|
||||||
zap.String("path", path.Join(p, u64ToHexString(active.ind))),
|
zap.String("path", filepath.Join(p, u64ToHexString(active.ind))),
|
||||||
)
|
)
|
||||||
|
|
||||||
if err := b.updateActive(p, &active.ind); err != nil {
|
if err := b.updateActive(p, &active.ind); err != nil {
|
||||||
|
@ -172,14 +172,14 @@ func (b *blobovniczas) put(addr *objectSDK.Address, data []byte) (*blobovnicza.I
|
||||||
}
|
}
|
||||||
|
|
||||||
b.log.Debug("could not put object to active blobovnicza",
|
b.log.Debug("could not put object to active blobovnicza",
|
||||||
zap.String("path", path.Join(p, u64ToHexString(active.ind))),
|
zap.String("path", filepath.Join(p, u64ToHexString(active.ind))),
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
|
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
p = path.Join(p, u64ToHexString(active.ind))
|
p = filepath.Join(p, u64ToHexString(active.ind))
|
||||||
|
|
||||||
id = blobovnicza.NewIDFromBytes([]byte(p))
|
id = blobovnicza.NewIDFromBytes([]byte(p))
|
||||||
|
|
||||||
|
@ -217,7 +217,7 @@ func (b *blobovniczas) get(prm *GetSmallPrm) (res *GetSmallRes, err error) {
|
||||||
activeCache := make(map[string]struct{})
|
activeCache := make(map[string]struct{})
|
||||||
|
|
||||||
err = b.iterateSortedLeaves(prm.addr, func(p string) (bool, error) {
|
err = b.iterateSortedLeaves(prm.addr, func(p string) (bool, error) {
|
||||||
dirPath := path.Dir(p)
|
dirPath := filepath.Dir(p)
|
||||||
|
|
||||||
_, ok := activeCache[dirPath]
|
_, ok := activeCache[dirPath]
|
||||||
|
|
||||||
|
@ -267,7 +267,7 @@ func (b *blobovniczas) delete(prm *DeleteSmallPrm) (res *DeleteSmallRes, err err
|
||||||
activeCache := make(map[string]struct{})
|
activeCache := make(map[string]struct{})
|
||||||
|
|
||||||
err = b.iterateSortedLeaves(prm.addr, func(p string) (bool, error) {
|
err = b.iterateSortedLeaves(prm.addr, func(p string) (bool, error) {
|
||||||
dirPath := path.Dir(p)
|
dirPath := filepath.Dir(p)
|
||||||
|
|
||||||
// don't process active blobovnicza of the level twice
|
// don't process active blobovnicza of the level twice
|
||||||
_, ok := activeCache[dirPath]
|
_, ok := activeCache[dirPath]
|
||||||
|
@ -319,7 +319,7 @@ func (b *blobovniczas) getRange(prm *GetRangeSmallPrm) (res *GetRangeSmallRes, e
|
||||||
activeCache := make(map[string]struct{})
|
activeCache := make(map[string]struct{})
|
||||||
|
|
||||||
err = b.iterateSortedLeaves(prm.addr, func(p string) (bool, error) {
|
err = b.iterateSortedLeaves(prm.addr, func(p string) (bool, error) {
|
||||||
dirPath := path.Dir(p)
|
dirPath := filepath.Dir(p)
|
||||||
|
|
||||||
_, ok := activeCache[dirPath]
|
_, ok := activeCache[dirPath]
|
||||||
|
|
||||||
|
@ -351,7 +351,7 @@ func (b *blobovniczas) getRange(prm *GetRangeSmallPrm) (res *GetRangeSmallRes, e
|
||||||
//
|
//
|
||||||
// returns no error if object was removed from some blobovnicza of the same level.
|
// returns no error if object was removed from some blobovnicza of the same level.
|
||||||
func (b *blobovniczas) deleteObjectFromLevel(prm *blobovnicza.DeletePrm, blzPath string, tryActive bool, dp *DeleteSmallPrm) (*DeleteSmallRes, error) {
|
func (b *blobovniczas) deleteObjectFromLevel(prm *blobovnicza.DeletePrm, blzPath string, tryActive bool, dp *DeleteSmallPrm) (*DeleteSmallRes, error) {
|
||||||
lvlPath := path.Dir(blzPath)
|
lvlPath := filepath.Dir(blzPath)
|
||||||
|
|
||||||
log := b.log.With(
|
log := b.log.With(
|
||||||
zap.String("path", blzPath),
|
zap.String("path", blzPath),
|
||||||
|
@ -394,7 +394,7 @@ func (b *blobovniczas) deleteObjectFromLevel(prm *blobovnicza.DeletePrm, blzPath
|
||||||
// check if it makes sense to try to open the blob
|
// check if it makes sense to try to open the blob
|
||||||
// (blobovniczas "after" the active one are empty anyway,
|
// (blobovniczas "after" the active one are empty anyway,
|
||||||
// and it's pointless to open them).
|
// and it's pointless to open them).
|
||||||
if u64FromHexString(path.Base(blzPath)) > active.ind {
|
if u64FromHexString(filepath.Base(blzPath)) > active.ind {
|
||||||
log.Debug("index is too big")
|
log.Debug("index is too big")
|
||||||
return nil, object.ErrNotFound
|
return nil, object.ErrNotFound
|
||||||
}
|
}
|
||||||
|
@ -412,7 +412,7 @@ func (b *blobovniczas) deleteObjectFromLevel(prm *blobovnicza.DeletePrm, blzPath
|
||||||
//
|
//
|
||||||
// returns error if object could not be read from any blobovnicza of the same level.
|
// returns error if object could not be read from any blobovnicza of the same level.
|
||||||
func (b *blobovniczas) getObjectFromLevel(prm *blobovnicza.GetPrm, blzPath string, tryActive bool) (*GetSmallRes, error) {
|
func (b *blobovniczas) getObjectFromLevel(prm *blobovnicza.GetPrm, blzPath string, tryActive bool) (*GetSmallRes, error) {
|
||||||
lvlPath := path.Dir(blzPath)
|
lvlPath := filepath.Dir(blzPath)
|
||||||
|
|
||||||
log := b.log.With(
|
log := b.log.With(
|
||||||
zap.String("path", blzPath),
|
zap.String("path", blzPath),
|
||||||
|
@ -456,7 +456,7 @@ func (b *blobovniczas) getObjectFromLevel(prm *blobovnicza.GetPrm, blzPath strin
|
||||||
// check if it makes sense to try to open the blob
|
// check if it makes sense to try to open the blob
|
||||||
// (blobovniczas "after" the active one are empty anyway,
|
// (blobovniczas "after" the active one are empty anyway,
|
||||||
// and it's pointless to open them).
|
// and it's pointless to open them).
|
||||||
if u64FromHexString(path.Base(blzPath)) > active.ind {
|
if u64FromHexString(filepath.Base(blzPath)) > active.ind {
|
||||||
log.Debug("index is too big")
|
log.Debug("index is too big")
|
||||||
return nil, object.ErrNotFound
|
return nil, object.ErrNotFound
|
||||||
}
|
}
|
||||||
|
@ -474,7 +474,7 @@ func (b *blobovniczas) getObjectFromLevel(prm *blobovnicza.GetPrm, blzPath strin
|
||||||
//
|
//
|
||||||
// returns error if object could not be read from any blobovnicza of the same level.
|
// returns error if object could not be read from any blobovnicza of the same level.
|
||||||
func (b *blobovniczas) getRangeFromLevel(prm *GetRangeSmallPrm, blzPath string, tryActive bool) (*GetRangeSmallRes, error) {
|
func (b *blobovniczas) getRangeFromLevel(prm *GetRangeSmallPrm, blzPath string, tryActive bool) (*GetRangeSmallRes, error) {
|
||||||
lvlPath := path.Dir(blzPath)
|
lvlPath := filepath.Dir(blzPath)
|
||||||
|
|
||||||
log := b.log.With(
|
log := b.log.With(
|
||||||
zap.String("path", blzPath),
|
zap.String("path", blzPath),
|
||||||
|
@ -528,7 +528,7 @@ func (b *blobovniczas) getRangeFromLevel(prm *GetRangeSmallPrm, blzPath string,
|
||||||
// check if it makes sense to try to open the blob
|
// check if it makes sense to try to open the blob
|
||||||
// (blobovniczas "after" the active one are empty anyway,
|
// (blobovniczas "after" the active one are empty anyway,
|
||||||
// and it's pointless to open them).
|
// and it's pointless to open them).
|
||||||
if u64FromHexString(path.Base(blzPath)) > active.ind {
|
if u64FromHexString(filepath.Base(blzPath)) > active.ind {
|
||||||
log.Debug("index is too big")
|
log.Debug("index is too big")
|
||||||
return nil, object.ErrNotFound
|
return nil, object.ErrNotFound
|
||||||
}
|
}
|
||||||
|
@ -653,7 +653,7 @@ func (b *blobovniczas) iterateSortedLeaves(addr *objectSDK.Address, f func(strin
|
||||||
addr,
|
addr,
|
||||||
make([]string, 0, b.blzShallowDepth),
|
make([]string, 0, b.blzShallowDepth),
|
||||||
b.blzShallowDepth,
|
b.blzShallowDepth,
|
||||||
func(p []string) (bool, error) { return f(path.Join(p...)) },
|
func(p []string) (bool, error) { return f(filepath.Join(p...)) },
|
||||||
)
|
)
|
||||||
|
|
||||||
return err
|
return err
|
||||||
|
@ -670,7 +670,7 @@ func (b *blobovniczas) iterateDeepest(addr *objectSDK.Address, f func(string) (b
|
||||||
addr,
|
addr,
|
||||||
make([]string, 0, depth),
|
make([]string, 0, depth),
|
||||||
depth,
|
depth,
|
||||||
func(p []string) (bool, error) { return f(path.Join(p...)) },
|
func(p []string) (bool, error) { return f(filepath.Join(p...)) },
|
||||||
)
|
)
|
||||||
|
|
||||||
return err
|
return err
|
||||||
|
@ -680,7 +680,7 @@ func (b *blobovniczas) iterateDeepest(addr *objectSDK.Address, f func(string) (b
|
||||||
func (b *blobovniczas) iterateSorted(addr *objectSDK.Address, curPath []string, execDepth uint64, f func([]string) (bool, error)) (bool, error) {
|
func (b *blobovniczas) iterateSorted(addr *objectSDK.Address, curPath []string, execDepth uint64, f func([]string) (bool, error)) (bool, error) {
|
||||||
indices := indexSlice(b.blzShallowWidth)
|
indices := indexSlice(b.blzShallowWidth)
|
||||||
|
|
||||||
hrw.SortSliceByValue(indices, addressHash(addr, path.Join(curPath...)))
|
hrw.SortSliceByValue(indices, addressHash(addr, filepath.Join(curPath...)))
|
||||||
|
|
||||||
exec := uint64(len(curPath)) == execDepth
|
exec := uint64(len(curPath)) == execDepth
|
||||||
|
|
||||||
|
@ -754,7 +754,7 @@ func (b *blobovniczas) updateAndGet(p string, old *uint64) (blobovniczaWithIndex
|
||||||
}
|
}
|
||||||
|
|
||||||
var err error
|
var err error
|
||||||
if active.blz, err = b.openBlobovnicza(path.Join(p, u64ToHexString(active.ind))); err != nil {
|
if active.blz, err = b.openBlobovnicza(filepath.Join(p, u64ToHexString(active.ind))); err != nil {
|
||||||
return active, err
|
return active, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -773,7 +773,7 @@ func (b *blobovniczas) updateAndGet(p string, old *uint64) (blobovniczaWithIndex
|
||||||
b.active[p] = active
|
b.active[p] = active
|
||||||
|
|
||||||
b.log.Debug("blobovnicza successfully activated",
|
b.log.Debug("blobovnicza successfully activated",
|
||||||
zap.String("path", path.Join(p, u64ToHexString(active.ind))),
|
zap.String("path", filepath.Join(p, u64ToHexString(active.ind))),
|
||||||
)
|
)
|
||||||
|
|
||||||
return active, nil
|
return active, nil
|
||||||
|
@ -887,7 +887,7 @@ func (b *blobovniczas) openBlobovnicza(p string) (*blobovnicza.Blobovnicza, erro
|
||||||
}
|
}
|
||||||
|
|
||||||
blz := blobovnicza.New(append(b.blzOpts,
|
blz := blobovnicza.New(append(b.blzOpts,
|
||||||
blobovnicza.WithPath(path.Join(b.blzRootPath, p)),
|
blobovnicza.WithPath(filepath.Join(b.blzRootPath, p)),
|
||||||
)...)
|
)...)
|
||||||
|
|
||||||
if err := blz.Open(); err != nil {
|
if err := blz.Open(); err != nil {
|
||||||
|
|
|
@ -3,7 +3,7 @@ package blobstor
|
||||||
import (
|
import (
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"io/fs"
|
"io/fs"
|
||||||
"path"
|
"path/filepath"
|
||||||
|
|
||||||
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/blobovnicza"
|
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/blobovnicza"
|
||||||
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/blobstor/fstree"
|
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/blobstor/fstree"
|
||||||
|
@ -132,7 +132,7 @@ func WithUncompressableContentTypes(values []string) Option {
|
||||||
func WithRootPath(rootDir string) Option {
|
func WithRootPath(rootDir string) Option {
|
||||||
return func(c *cfg) {
|
return func(c *cfg) {
|
||||||
c.fsTree.RootPath = rootDir
|
c.fsTree.RootPath = rootDir
|
||||||
c.blzRootPath = path.Join(rootDir, blobovniczaDir)
|
c.blzRootPath = filepath.Join(rootDir, blobovniczaDir)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -6,7 +6,6 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/fs"
|
"io/fs"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
@ -94,7 +93,7 @@ func (t *FSTree) Iterate(prm *IterationPrm) error {
|
||||||
|
|
||||||
func (t *FSTree) iterate(depth int, curPath []string, prm *IterationPrm) error {
|
func (t *FSTree) iterate(depth int, curPath []string, prm *IterationPrm) error {
|
||||||
curName := strings.Join(curPath[1:], "")
|
curName := strings.Join(curPath[1:], "")
|
||||||
des, err := os.ReadDir(path.Join(curPath...))
|
des, err := os.ReadDir(filepath.Join(curPath...))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if prm.ignoreErrors {
|
if prm.ignoreErrors {
|
||||||
return nil
|
return nil
|
||||||
|
@ -127,7 +126,7 @@ func (t *FSTree) iterate(depth int, curPath []string, prm *IterationPrm) error {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
data, err := os.ReadFile(path.Join(curPath...))
|
data, err := os.ReadFile(filepath.Join(curPath...))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if prm.ignoreErrors {
|
if prm.ignoreErrors {
|
||||||
continue
|
continue
|
||||||
|
@ -157,7 +156,7 @@ func (t *FSTree) treePath(addr *objectSDK.Address) string {
|
||||||
|
|
||||||
dirs = append(dirs, sAddr)
|
dirs = append(dirs, sAddr)
|
||||||
|
|
||||||
return path.Join(dirs...)
|
return filepath.Join(dirs...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Delete removes object with the specified address from storage.
|
// Delete removes object with the specified address from storage.
|
||||||
|
@ -187,7 +186,7 @@ func (t *FSTree) Exists(addr *objectSDK.Address) (string, error) {
|
||||||
func (t *FSTree) Put(addr *objectSDK.Address, data []byte) error {
|
func (t *FSTree) Put(addr *objectSDK.Address, data []byte) error {
|
||||||
p := t.treePath(addr)
|
p := t.treePath(addr)
|
||||||
|
|
||||||
if err := util.MkdirAllX(path.Dir(p), t.Permissions); err != nil {
|
if err := util.MkdirAllX(filepath.Dir(p), t.Permissions); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -5,7 +5,6 @@ import (
|
||||||
"crypto/sha256"
|
"crypto/sha256"
|
||||||
"errors"
|
"errors"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
@ -43,7 +42,7 @@ func TestAddressToString(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFSTree(t *testing.T) {
|
func TestFSTree(t *testing.T) {
|
||||||
tmpDir := path.Join(os.TempDir(), "neofs.fstree.test")
|
tmpDir := filepath.Join(os.TempDir(), "neofs.fstree.test")
|
||||||
require.NoError(t, os.Mkdir(tmpDir, os.ModePerm))
|
require.NoError(t, os.Mkdir(tmpDir, os.ModePerm))
|
||||||
t.Cleanup(func() { require.NoError(t, os.RemoveAll(tmpDir)) })
|
t.Cleanup(func() { require.NoError(t, os.RemoveAll(tmpDir)) })
|
||||||
|
|
||||||
|
|
|
@ -4,7 +4,7 @@ import (
|
||||||
"crypto/sha256"
|
"crypto/sha256"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"path"
|
"path/filepath"
|
||||||
"sync"
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
@ -57,13 +57,13 @@ func testNewShard(t *testing.T, id int) *shard.Shard {
|
||||||
shard.WithID(sid),
|
shard.WithID(sid),
|
||||||
shard.WithLogger(zap.L()),
|
shard.WithLogger(zap.L()),
|
||||||
shard.WithBlobStorOptions(
|
shard.WithBlobStorOptions(
|
||||||
blobstor.WithRootPath(path.Join(t.Name(), fmt.Sprintf("%d.blobstor", id))),
|
blobstor.WithRootPath(filepath.Join(t.Name(), fmt.Sprintf("%d.blobstor", id))),
|
||||||
blobstor.WithBlobovniczaShallowWidth(2),
|
blobstor.WithBlobovniczaShallowWidth(2),
|
||||||
blobstor.WithBlobovniczaShallowDepth(2),
|
blobstor.WithBlobovniczaShallowDepth(2),
|
||||||
blobstor.WithRootPerm(0700),
|
blobstor.WithRootPerm(0700),
|
||||||
),
|
),
|
||||||
shard.WithMetaBaseOptions(
|
shard.WithMetaBaseOptions(
|
||||||
meta.WithPath(path.Join(t.Name(), fmt.Sprintf("%d.metabase", id))),
|
meta.WithPath(filepath.Join(t.Name(), fmt.Sprintf("%d.metabase", id))),
|
||||||
meta.WithPermissions(0700),
|
meta.WithPermissions(0700),
|
||||||
))
|
))
|
||||||
|
|
||||||
|
|
|
@ -2,7 +2,7 @@ package meta
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"path"
|
"path/filepath"
|
||||||
|
|
||||||
"github.com/nspcc-dev/neofs-node/pkg/util"
|
"github.com/nspcc-dev/neofs-node/pkg/util"
|
||||||
"go.etcd.io/bbolt"
|
"go.etcd.io/bbolt"
|
||||||
|
@ -11,7 +11,7 @@ import (
|
||||||
|
|
||||||
// Open boltDB instance for metabase.
|
// Open boltDB instance for metabase.
|
||||||
func (db *DB) Open() error {
|
func (db *DB) Open() error {
|
||||||
err := util.MkdirAllX(path.Dir(db.info.Path), db.info.Permission)
|
err := util.MkdirAllX(filepath.Dir(db.info.Path), db.info.Permission)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("can't create dir %s for metabase: %w", db.info.Path, err)
|
return fmt.Errorf("can't create dir %s for metabase: %w", db.info.Path, err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,7 +2,7 @@ package shard
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path/filepath"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/nspcc-dev/neofs-node/pkg/core/object"
|
"github.com/nspcc-dev/neofs-node/pkg/core/object"
|
||||||
|
@ -19,7 +19,7 @@ func TestRefillMetabase(t *testing.T) {
|
||||||
defer os.RemoveAll(p)
|
defer os.RemoveAll(p)
|
||||||
|
|
||||||
blobOpts := []blobstor.Option{
|
blobOpts := []blobstor.Option{
|
||||||
blobstor.WithRootPath(path.Join(p, "blob")),
|
blobstor.WithRootPath(filepath.Join(p, "blob")),
|
||||||
blobstor.WithBlobovniczaShallowWidth(1),
|
blobstor.WithBlobovniczaShallowWidth(1),
|
||||||
blobstor.WithBlobovniczaShallowDepth(1),
|
blobstor.WithBlobovniczaShallowDepth(1),
|
||||||
}
|
}
|
||||||
|
@ -27,7 +27,7 @@ func TestRefillMetabase(t *testing.T) {
|
||||||
sh := New(
|
sh := New(
|
||||||
WithBlobStorOptions(blobOpts...),
|
WithBlobStorOptions(blobOpts...),
|
||||||
WithMetaBaseOptions(
|
WithMetaBaseOptions(
|
||||||
meta.WithPath(path.Join(p, "meta")),
|
meta.WithPath(filepath.Join(p, "meta")),
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -141,7 +141,7 @@ func TestRefillMetabase(t *testing.T) {
|
||||||
sh = New(
|
sh = New(
|
||||||
WithBlobStorOptions(blobOpts...),
|
WithBlobStorOptions(blobOpts...),
|
||||||
WithMetaBaseOptions(
|
WithMetaBaseOptions(
|
||||||
meta.WithPath(path.Join(p, "meta_restored")),
|
meta.WithPath(filepath.Join(p, "meta_restored")),
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -4,7 +4,7 @@ import (
|
||||||
"crypto/sha256"
|
"crypto/sha256"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
@ -34,27 +34,27 @@ func newShard(t testing.TB, enableWriteCache bool) *shard.Shard {
|
||||||
|
|
||||||
func newCustomShard(t testing.TB, rootPath string, enableWriteCache bool, wcOpts []writecache.Option, bsOpts []blobstor.Option) *shard.Shard {
|
func newCustomShard(t testing.TB, rootPath string, enableWriteCache bool, wcOpts []writecache.Option, bsOpts []blobstor.Option) *shard.Shard {
|
||||||
if enableWriteCache {
|
if enableWriteCache {
|
||||||
rootPath = path.Join(rootPath, "wc")
|
rootPath = filepath.Join(rootPath, "wc")
|
||||||
} else {
|
} else {
|
||||||
rootPath = path.Join(rootPath, "nowc")
|
rootPath = filepath.Join(rootPath, "nowc")
|
||||||
}
|
}
|
||||||
|
|
||||||
opts := []shard.Option{
|
opts := []shard.Option{
|
||||||
shard.WithLogger(zap.L()),
|
shard.WithLogger(zap.L()),
|
||||||
shard.WithBlobStorOptions(
|
shard.WithBlobStorOptions(
|
||||||
append([]blobstor.Option{
|
append([]blobstor.Option{
|
||||||
blobstor.WithRootPath(path.Join(rootPath, "blob")),
|
blobstor.WithRootPath(filepath.Join(rootPath, "blob")),
|
||||||
blobstor.WithBlobovniczaShallowWidth(2),
|
blobstor.WithBlobovniczaShallowWidth(2),
|
||||||
blobstor.WithBlobovniczaShallowDepth(2),
|
blobstor.WithBlobovniczaShallowDepth(2),
|
||||||
}, bsOpts...)...,
|
}, bsOpts...)...,
|
||||||
),
|
),
|
||||||
shard.WithMetaBaseOptions(
|
shard.WithMetaBaseOptions(
|
||||||
meta.WithPath(path.Join(rootPath, "meta")),
|
meta.WithPath(filepath.Join(rootPath, "meta")),
|
||||||
),
|
),
|
||||||
shard.WithWriteCache(enableWriteCache),
|
shard.WithWriteCache(enableWriteCache),
|
||||||
shard.WithWriteCacheOptions(
|
shard.WithWriteCacheOptions(
|
||||||
append(
|
append(
|
||||||
[]writecache.Option{writecache.WithPath(path.Join(rootPath, "wcache"))},
|
[]writecache.Option{writecache.WithPath(filepath.Join(rootPath, "wcache"))},
|
||||||
wcOpts...)...,
|
wcOpts...)...,
|
||||||
),
|
),
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,14 +2,14 @@ package writecache
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path/filepath"
|
||||||
|
|
||||||
"go.etcd.io/bbolt"
|
"go.etcd.io/bbolt"
|
||||||
)
|
)
|
||||||
|
|
||||||
// OpenDB opens BoltDB instance for write-cache. Opens in read-only mode if ro is true.
|
// OpenDB opens BoltDB instance for write-cache. Opens in read-only mode if ro is true.
|
||||||
func OpenDB(p string, ro bool) (*bbolt.DB, error) {
|
func OpenDB(p string, ro bool) (*bbolt.DB, error) {
|
||||||
return bbolt.Open(path.Join(p, dbName), os.ModePerm, &bbolt.Options{
|
return bbolt.Open(filepath.Join(p, dbName), os.ModePerm, &bbolt.Options{
|
||||||
NoFreelistSync: true,
|
NoFreelistSync: true,
|
||||||
NoSync: true,
|
NoSync: true,
|
||||||
ReadOnly: ro,
|
ReadOnly: ro,
|
||||||
|
|
|
@ -4,7 +4,7 @@ import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"path"
|
"path/filepath"
|
||||||
|
|
||||||
"github.com/nspcc-dev/neofs-node/pkg/util"
|
"github.com/nspcc-dev/neofs-node/pkg/util"
|
||||||
locodedb "github.com/nspcc-dev/neofs-node/pkg/util/locode/db"
|
locodedb "github.com/nspcc-dev/neofs-node/pkg/util/locode/db"
|
||||||
|
@ -20,7 +20,7 @@ func (db *DB) Open() error {
|
||||||
// copy-paste from metabase:
|
// copy-paste from metabase:
|
||||||
// consider universal Open/Close for BoltDB wrappers
|
// consider universal Open/Close for BoltDB wrappers
|
||||||
|
|
||||||
err := util.MkdirAllX(path.Dir(db.path), db.mode)
|
err := util.MkdirAllX(filepath.Dir(db.path), db.mode)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("could not create dir for BoltDB: %w", err)
|
return fmt.Errorf("could not create dir for BoltDB: %w", err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
package state_test
|
package state_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"path"
|
"path/filepath"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/nspcc-dev/neofs-node/pkg/util/state"
|
"github.com/nspcc-dev/neofs-node/pkg/util/state"
|
||||||
|
@ -9,7 +9,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestPersistentStorage_UInt32(t *testing.T) {
|
func TestPersistentStorage_UInt32(t *testing.T) {
|
||||||
storage, err := state.NewPersistentStorage(path.Join(t.TempDir(), ".storage"))
|
storage, err := state.NewPersistentStorage(filepath.Join(t.TempDir(), ".storage"))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
defer storage.Close()
|
defer storage.Close()
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue