Compare commits

..

No commits in common. "master" and "bugfix/support-ec" have entirely different histories.

35 changed files with 215 additions and 1067 deletions

View file

@ -13,7 +13,7 @@ jobs:
- name: Setup Go
uses: actions/setup-go@v3
with:
go-version: '1.22'
go-version: '1.21'
- name: Run commit format checker
uses: https://git.frostfs.info/TrueCloudLab/dco-go@v2

View file

@ -11,21 +11,20 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version: '1.23'
go-version: '1.22'
cache: true
- name: Install linters
run: make lint-install
- name: Run linters
run: make lint
- name: golangci-lint
uses: https://github.com/golangci/golangci-lint-action@v3
with:
version: latest
tests:
name: Tests
runs-on: ubuntu-latest
strategy:
matrix:
go_versions: [ '1.22', '1.23' ]
go_versions: [ '1.21', '1.22' ]
fail-fast: false
steps:
- uses: actions/checkout@v3
@ -48,7 +47,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version: '1.22'
go-version: '1.21'
cache: true
- name: Run tests

View file

@ -3,8 +3,8 @@
First, thank you for contributing! We love and encourage pull requests from
everyone. Please follow the guidelines:
- Check the open [issues](https://git.frostfs.info/TrueCloudLab/xk6-frostfs/issues) and
[pull requests](https://git.frostfs.info/TrueCloudLab/xk6-frostfs/pulls) for existing
- Check the open [issues](https://github.com/TrueCloudLab/xk6-frostfs/issues) and
[pull requests](https://github.com/TrueCloudLab/xk6-frostfs/pulls) for existing
discussions.
- Open an issue first, to discuss a new feature or enhancement.
@ -27,20 +27,19 @@ Start by forking the `xk6-frostfs` repository, make changes in a branch and then
send a pull request. We encourage pull requests to discuss code changes. Here
are the steps in details:
### Set up your repository
Fork [xk6-frostfs upstream](https://git.frostfs.info/TrueCloudLab/xk6-frostfs/fork) source
### Set up your GitHub Repository
Fork [xk6-frostfs upstream](https://github.com/TrueCloudLab/xk6-frostfs/fork) source
repository to your own personal repository. Copy the URL of your fork (you will
need it for the `git clone` command below).
```sh
$ git clone https://git.frostfs.info/TrueCloudLab/xk6-frostfs
$ git clone https://github.com/TrueCloudLab/xk6-frostfs
```
### Set up git remote as ``upstream``
```sh
$ cd xk6-frostfs
$ git remote add upstream https://git.frostfs.info/TrueCloudLab/xk6-frostfs
$ git remote add upstream https://github.com/TrueCloudLab/xk6-frostfs
$ git fetch upstream
$ git merge upstream/master
...
@ -90,7 +89,7 @@ $ git push origin feature/123-something_awesome
```
### Create a Pull Request
Pull requests can be created via git.frostfs.info. Refer to [this
Pull requests can be created via GitHub. Refer to [this
document](https://help.github.com/articles/creating-a-pull-request/) for
detailed steps on how to create a pull request. After a Pull Request gets peer
reviewed and approved, it will be merged.

View file

@ -3,15 +3,10 @@
# Common variables
REPO ?= $(shell go list -m)
VERSION ?= $(shell git describe --tags --dirty --match "v*" --always --abbrev=8 2>/dev/null || cat VERSION 2>/dev/null || echo "develop")
GO_VERSION ?= 1.22
LINT_VERSION ?= 1.60.3
TRUECLOUDLAB_LINT_VERSION ?= 0.0.7
GO_VERSION ?= 1.19
LINT_VERSION ?= 1.49.0
BINDIR = bin
OUTPUT_LINT_DIR ?= $(abspath $(BINDIR))/linters
LINT_DIR = $(OUTPUT_LINT_DIR)/golangci-lint-$(LINT_VERSION)-v$(TRUECLOUDLAB_LINT_VERSION)
TMP_DIR := .cache
# Binaries to build
CMDS = $(addprefix frostfs-, $(notdir $(wildcard cmd/*)))
BINS = $(addprefix $(BINDIR)/, $(CMDS))
@ -69,22 +64,7 @@ format:
# Run linters
lint:
@if [ ! -d "$(LINT_DIR)" ]; then \
make lint-install; \
fi
$(LINT_DIR)/golangci-lint run --timeout=5m
# Install linters
lint-install:
@rm -rf $(OUTPUT_LINT_DIR)
@mkdir -p $(OUTPUT_LINT_DIR)
@mkdir -p $(TMP_DIR)
@rm -rf $(TMP_DIR)/linters
@git -c advice.detachedHead=false clone --branch v$(TRUECLOUDLAB_LINT_VERSION) https://git.frostfs.info/TrueCloudLab/linters.git $(TMP_DIR)/linters
@@make -C $(TMP_DIR)/linters lib CGO_ENABLED=1 OUT_DIR=$(OUTPUT_LINT_DIR)
@rm -rf $(TMP_DIR)/linters
@rmdir $(TMP_DIR) 2>/dev/null || true
@CGO_ENABLED=1 GOBIN=$(LINT_DIR) go install -trimpath github.com/golangci/golangci-lint/cmd/golangci-lint@v$(LINT_VERSION)
@golangci-lint --timeout=5m run
# Run linters in Docker
docker/lint:

View file

@ -1,5 +1,5 @@
<p align="center">
<img src="./.forgejo/logo.svg" width="500px" alt="FrostFS logo">
<img src="./.github/logo.svg" width="500px" alt="FrostFS logo">
</p>
<p align="center">
<a href="https://go.k6.io/k6">k6</a> extension to test and benchmark FrostFS related protocols.
@ -48,16 +48,15 @@ Create native client with `connect` method. Arguments:
- dial timeout in seconds (0 for the default value)
- stream timeout in seconds (0 for the default value)
- generate object header on the client side (for big object - split locally too)
- max size for generated object header on the client side (for big object - the size that the object is splitted into)
```js
import native from 'k6/x/frostfs/native';
const frostfs_cli = native.connect("s01.frostfs.devenv:8080", "", 0, 0, false, 0)
const frostfs_cli = native.connect("s01.frostfs.devenv:8080", "", 0, 0, false)
```
### Methods
- `putContainer(params)`. The `params` is a dictionary (e.g.
`{placement_policy:'REP 3',name:'container-name',name_global_scope:'false'}`).
`{acl:'public-read-write',placement_policy:'REP 3',name:'container-name',name_global_scope:'false'}`).
Returns dictionary with `success`
boolean flag, `container_id` string, and `error` string.
- `setBufferSize(size)`. Sets internal buffer size for data upload and
@ -186,25 +185,6 @@ Flags:
-v, --version version for registry-exporter
```
## Import pregen into registry db
You can import pregenerated json files into registry bolt db. Use `frostfs-xk6-registry import`. Usage examples are in help:
```shell
$ ./bin/frostfs-xk6-registry import -h
Import objects into registry from pregenerated files
Usage:
xk6-registry import [flags]
Examples:
xk6-registry import registry.bolt preset.json
xk6-registry import registry.bolt preset.json another_preset.json
Flags:
-h, --help help for import
```
# License
- [GNU General Public License v3.0](LICENSE)

View file

@ -1,55 +0,0 @@
package importer
import (
"encoding/json"
"os"
"git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/registry"
)
type PreGenObj struct {
Bucket string `json:"bucket"`
Object string `json:"object"`
Container string `json:"container"`
}
type PreGenerateInfo struct {
Buckets []string `json:"buckets"`
Containers []string `json:"containers"`
Objects []PreGenObj `json:"objects"`
ObjSize string `json:"obj_size"`
}
// ImportJSONPreGen writes objects from pregenerated JSON file
// to the registry.
// Note that ImportJSONPreGen does not check if object already
// exists in the registry so in case of re-entry the registry
// will have two entities representing the same object.
func ImportJSONPreGen(o *registry.ObjRegistry, filename string) error {
f, err := os.ReadFile(filename)
if err != nil {
return err
}
var pregenInfo PreGenerateInfo
err = json.Unmarshal(f, &pregenInfo)
if err != nil {
return err
}
// AddObject uses DB.Batch to combine concurrent Batch calls
// into a single Bolt transaction. DB.Batch is limited by
// DB.MaxBatchDelay which may affect perfomance.
for _, obj := range pregenInfo.Objects {
if obj.Bucket != "" {
err = o.AddObject("", "", obj.Bucket, obj.Object, "")
} else {
err = o.AddObject(obj.Container, obj.Object, "", "", "")
}
if err != nil {
return err
}
}
return nil
}

View file

@ -1,27 +0,0 @@
package importer
import (
"git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/registry"
"github.com/spf13/cobra"
)
// Cmd represents the import command.
var Cmd = &cobra.Command{
Use: "import",
Short: "Import objects into registry",
Long: "Import objects into registry from pregenerated files",
Example: `xk6-registry import registry.bolt preset.json
xk6-registry import registry.bolt preset.json another_preset.json`,
RunE: runCmd,
Args: cobra.MinimumNArgs(2),
}
func runCmd(cmd *cobra.Command, args []string) error {
objRegistry := registry.NewObjRegistry(cmd.Context(), args[0])
for i := 1; i < len(args); i++ {
if err := ImportJSONPreGen(objRegistry, args[i]); err != nil {
return err
}
}
return nil
}

View file

@ -1,18 +0,0 @@
package main
import (
"context"
"os"
"os/signal"
"syscall"
)
func main() {
ctx, _ := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM, syscall.SIGHUP)
if cmd, err := rootCmd.ExecuteContextC(ctx); err != nil {
cmd.PrintErrln("Error:", err.Error())
cmd.PrintErrf("Run '%v --help' for usage.\n", cmd.CommandPath())
os.Exit(1)
}
}

View file

@ -1,33 +0,0 @@
package main
import (
"runtime"
"git.frostfs.info/TrueCloudLab/xk6-frostfs/cmd/xk6-registry/importer"
"git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/version"
"github.com/spf13/cobra"
)
var rootCmd = &cobra.Command{
Use: "xk6-registry",
Version: version.Version,
Short: "Command Line Tool to work with Registry",
Long: `Registry provides tools to work with object registry for xk6.
It contains command for importing objects in registry from preset`,
SilenceErrors: true,
SilenceUsage: true,
Run: rootCmdRun,
}
func init() {
cobra.AddTemplateFunc("runtimeVersion", runtime.Version)
rootCmd.SetVersionTemplate(`FrostFS xk6-registry
{{printf "Version: %s" .Version }}
GoVersion: {{ runtimeVersion }}
`)
rootCmd.AddCommand(importer.Cmd)
}
func rootCmdRun(cmd *cobra.Command, _ []string) {
_ = cmd.Usage()
}

View file

@ -3,7 +3,7 @@ import { fail } from "k6";
import { uuidv4 } from '../scenarios/libs/k6-utils-1.4.0.js';
const payload = open('../go.sum', 'b');
const frostfs_cli = native.connect("s01.frostfs.devenv:8080", "1dd37fba80fec4e6a6f13fd708d8dcb3b29def768017052f6c930fa1c5d90bbb", 0, 0, false, 0)
const frostfs_cli = native.connect("s01.frostfs.devenv:8080", "1dd37fba80fec4e6a6f13fd708d8dcb3b29def768017052f6c930fa1c5d90bbb", 0, 0, false)
export const options = {
stages: [
@ -13,6 +13,7 @@ export const options = {
export function setup() {
const params = {
acl: 'public-read-write',
placement_policy: 'REP 3',
name: 'container-name',
name_global_scope: 'false'

View file

@ -3,7 +3,7 @@ import { uuidv4 } from '../scenarios/libs/k6-utils-1.4.0.js';
const payload = open('../go.sum', 'b');
const container = "AjSxSNNXbJUDPqqKYm1VbFVDGCakbpUNH8aGjPmGAH3B"
const frostfs_cli = native.connect("s01.frostfs.devenv:8080", "", 0, 0, false, 0)
const frostfs_cli = native.connect("s01.frostfs.devenv:8080", "", 0, 0, false)
const frostfs_obj = frostfs_cli.onsite(container, payload)
export const options = {

2
go.mod
View file

@ -1,6 +1,6 @@
module git.frostfs.info/TrueCloudLab/xk6-frostfs
go 1.22
go 1.21
require (
git.frostfs.info/TrueCloudLab/frostfs-node v0.38.3-0.20240502170333-ec2873caa7c6

View file

@ -1,58 +0,0 @@
package native
import (
"context"
"sync"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
)
const networkCacheTTL = time.Minute
var networkInfoCache = &networkInfoCacheT{}
type networkInfoCacheT struct {
guard sync.RWMutex
current *netmap.NetworkInfo
fetchTS time.Time
}
func (c *networkInfoCacheT) getOrFetch(ctx context.Context, cli *client.Client) (*netmap.NetworkInfo, error) {
if v := c.get(); v != nil {
return v, nil
}
return c.fetch(ctx, cli)
}
func (c *networkInfoCacheT) get() *netmap.NetworkInfo {
c.guard.RLock()
defer c.guard.RUnlock()
if c.current == nil || time.Since(c.fetchTS) > networkCacheTTL {
return nil
}
return c.current
}
func (c *networkInfoCacheT) fetch(ctx context.Context, cli *client.Client) (*netmap.NetworkInfo, error) {
c.guard.Lock()
defer c.guard.Unlock()
if time.Since(c.fetchTS) <= networkCacheTTL {
return c.current, nil
}
res, err := cli.NetworkInfo(ctx, client.PrmNetworkInfo{})
if err != nil {
return nil, err
}
v := res.Info()
c.current = &v
c.fetchTS = time.Now()
return c.current, nil
}

View file

@ -13,6 +13,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@ -34,7 +35,6 @@ type (
tok session.Object
cli *client.Client
prepareLocally bool
maxObjSize uint64
}
PutResponse struct {
@ -71,7 +71,6 @@ type (
hdr object.Object
payload []byte
prepareLocally bool
maxObjSize uint64
}
)
@ -104,7 +103,7 @@ func (c *Client) Put(containerID string, headers map[string]string, payload data
o.SetOwnerID(owner)
o.SetAttributes(attrs...)
resp, err := put(c.vu, c.cli, c.prepareLocally, &tok, &o, payload, chunkSize, c.maxObjSize)
resp, err := put(c.vu, c.cli, c.prepareLocally, &tok, &o, payload, chunkSize)
if err != nil {
return PutResponse{Success: false, Error: err.Error()}
}
@ -241,7 +240,7 @@ func (c *Client) VerifyHash(containerID, objectID, expectedHash string) VerifyHa
}
actualHash := hex.EncodeToString(hasher.Sum(nil))
if actualHash != expectedHash {
return VerifyHashResponse{Success: false, Error: "hash mismatch"}
return VerifyHashResponse{Success: true, Error: "hash mismatch"}
}
return VerifyHashResponse{Success: true}
@ -264,6 +263,16 @@ func (c *Client) PutContainer(params map[string]string) PutContainerResponse {
container.SetCreationTime(&cnr, time.Now())
cnr.SetOwner(usr)
if basicACLStr, ok := params["acl"]; ok {
var basicACL acl.Basic
err := basicACL.DecodeString(basicACLStr)
if err != nil {
return c.putCnrErrorResponse(err)
}
cnr.SetBasicACL(basicACL)
}
placementPolicyStr, ok := params["placement_policy"]
if ok {
var placementPolicy netmap.PlacementPolicy
@ -364,7 +373,6 @@ func (c *Client) Onsite(containerID string, payload datagen.Payload) PreparedObj
hdr: *obj,
payload: data,
prepareLocally: c.prepareLocally,
maxObjSize: c.maxObjSize,
}
}
@ -390,7 +398,7 @@ func (p PreparedObject) Put(headers map[string]string) PutResponse {
return PutResponse{Success: false, Error: err.Error()}
}
_, err = put(p.vu, p.cli, p.prepareLocally, nil, &obj, datagen.NewFixedPayload(p.payload), 0, p.maxObjSize)
_, err = put(p.vu, p.cli, p.prepareLocally, nil, &obj, datagen.NewFixedPayload(p.payload), 0)
if err != nil {
return PutResponse{Success: false, Error: err.Error()}
}
@ -405,7 +413,7 @@ func (s epochSource) CurrentEpoch() uint64 {
}
func put(vu modules.VU, cli *client.Client, prepareLocally bool, tok *session.Object,
hdr *object.Object, payload datagen.Payload, chunkSize int, maxObjSize uint64,
hdr *object.Object, payload datagen.Payload, chunkSize int,
) (*client.ResObjectPut, error) {
bufSize := defaultBufferSize
if chunkSize > 0 {
@ -426,16 +434,13 @@ func put(vu modules.VU, cli *client.Client, prepareLocally bool, tok *session.Ob
prm.MaxChunkLength = chunkSize
}
if prepareLocally {
ni, err := networkInfoCache.getOrFetch(vu.Context(), cli)
res, err := cli.NetworkInfo(vu.Context(), client.PrmNetworkInfo{})
if err != nil {
return nil, err
}
prm.MaxSize = ni.MaxObjectSize()
prm.EpochSource = epochSource(ni.CurrentEpoch())
prm.MaxSize = res.Info().MaxObjectSize()
prm.EpochSource = epochSource(res.Info().CurrentEpoch())
prm.WithoutHomomorphHash = true
if maxObjSize > 0 {
prm.MaxSize = maxObjSize
}
}
objectWriter, err := cli.ObjectPutInit(vu.Context(), prm)

View file

@ -52,17 +52,13 @@ func (n *Native) Exports() modules.Exports {
return modules.Exports{Default: n}
}
func (n *Native) Connect(endpoint, hexPrivateKey string, dialTimeout, streamTimeout int, prepareLocally bool, maxObjSize int) (*Client, error) {
func (n *Native) Connect(endpoint, hexPrivateKey string, dialTimeout, streamTimeout int, prepareLocally bool) (*Client, error) {
var (
cli client.Client
pk *keys.PrivateKey
err error
)
if maxObjSize < 0 {
return nil, fmt.Errorf("max object size value must be positive")
}
pk, err = keys.NewPrivateKey()
if len(hexPrivateKey) != 0 {
pk, err = keys.NewPrivateKeyFromHex(hexPrivateKey)
@ -118,16 +114,6 @@ func (n *Native) Connect(endpoint, hexPrivateKey string, dialTimeout, streamTime
tok.SetAuthKey(&key)
tok.SetExp(exp)
if prepareLocally && maxObjSize > 0 {
res, err := cli.NetworkInfo(n.vu.Context(), client.PrmNetworkInfo{})
if err != nil {
return nil, err
}
if uint64(maxObjSize) > res.Info().MaxObjectSize() {
return nil, fmt.Errorf("max object size must be not greater than %d bytes", res.Info().MaxObjectSize())
}
}
// register metrics
objPutSuccess, _ = stats.Registry.NewMetric("frostfs_obj_put_success", metrics.Counter)
@ -154,6 +140,5 @@ func (n *Native) Connect(endpoint, hexPrivateKey string, dialTimeout, streamTime
tok: tok,
cli: &cli,
prepareLocally: prepareLocally,
maxObjSize: uint64(maxObjSize),
}, nil
}

View file

@ -11,7 +11,6 @@ type ObjExporter struct {
type PreGenerateInfo struct {
Buckets []string `json:"buckets"`
Containers []string `json:"containers"`
Objects []ObjInfo `json:"objects"`
ObjSize string `json:"obj_size"`
}
@ -19,8 +18,6 @@ type PreGenerateInfo struct {
type ObjInfo struct {
Bucket string `json:"bucket"`
Object string `json:"object"`
CID string `json:"cid"`
OID string `json:"oid"`
}
func NewObjExporter(selector *ObjSelector) *ObjExporter {
@ -40,7 +37,6 @@ func (o *ObjExporter) ExportJSONPreGen(fileName string) error {
}
bucketMap := make(map[string]struct{})
containerMap := make(map[string]struct{})
count, err := o.selector.Count()
if err != nil {
@ -54,7 +50,7 @@ func (o *ObjExporter) ExportJSONPreGen(fileName string) error {
break
}
if err = writeObjectInfo(comma, info, f); err != nil {
if _, err = f.WriteString(fmt.Sprintf(`%s{"bucket":"%s","object":"%s"}`, comma, info.S3Bucket, info.S3Key)); err != nil {
return err
}
@ -62,54 +58,15 @@ func (o *ObjExporter) ExportJSONPreGen(fileName string) error {
comma = ","
}
if info.S3Bucket != "" {
bucketMap[info.S3Bucket] = struct{}{}
}
if info.CID != "" {
containerMap[info.CID] = struct{}{}
}
}
if _, err = f.WriteString(`]`); err != nil {
return err
}
if len(bucketMap) > 0 {
if err = writeContainerInfo("buckets", bucketMap, f); err != nil {
return err
}
}
if len(containerMap) > 0 {
if err = writeContainerInfo("containers", containerMap, f); err != nil {
return err
}
}
if _, err = f.WriteString(`}`); err != nil {
return err
}
return nil
}
func writeObjectInfo(comma string, info *ObjectInfo, f *os.File) (err error) {
var res string
if info.S3Bucket != "" || info.S3Key != "" {
res = fmt.Sprintf(`%s{"bucket":"%s","object":"%s"}`, comma, info.S3Bucket, info.S3Key)
} else {
res = fmt.Sprintf(`%s{"cid":"%s","oid":"%s"}`, comma, info.CID, info.OID)
}
_, err = f.WriteString(res)
return err
}
func writeContainerInfo(attrName string, bucketMap map[string]struct{}, f *os.File) (err error) {
if _, err = f.WriteString(fmt.Sprintf(`,"%s":[`, attrName)); err != nil {
if _, err = f.WriteString(`],"buckets":[`); err != nil {
return err
}
i := 0
comma := ""
comma = ""
for bucket := range bucketMap {
if _, err = f.WriteString(fmt.Sprintf(`%s"%s"`, comma, bucket)); err != nil {
return err
@ -119,6 +76,7 @@ func writeContainerInfo(attrName string, bucketMap map[string]struct{}, f *os.Fi
}
i++
}
_, err = f.WriteString(`]`)
_, err = f.WriteString(`]}`)
return err
}

View file

@ -1,156 +0,0 @@
package registry
import (
"context"
"encoding/json"
"fmt"
"math/rand"
"os"
"path/filepath"
"slices"
"strings"
"testing"
"github.com/stretchr/testify/require"
)
type expectedResult struct {
mode string
objects []ObjectInfo
dir string
dbName string
jsonName string
}
func TestObjectExporter(t *testing.T) {
names := []string{"s3", "grpc"}
for _, name := range names {
t.Run(name, runExportTest)
t.Run(name+"-changed", runExportChangedTest)
t.Run(name+"-empty", runExportEmptyTest)
}
}
func runExportTest(t *testing.T) {
expected := getExpectedResult(t)
objReg := getFilledRegistry(t, expected)
objExp := NewObjExporter(NewObjSelector(objReg, 0, SelectorOneshot, &ObjFilter{Status: statusCreated}))
require.NoError(t, objExp.ExportJSONPreGen(expected.jsonName))
require.NoError(t, checkExported(expected.objects, expected.jsonName))
}
func runExportChangedTest(t *testing.T) {
expected := getExpectedResult(t)
objReg := getFilledRegistry(t, expected)
newStatus := randString(10)
num := randPositiveInt(1, len(expected.objects))
changedObjects := make([]ObjectInfo, num)
require.Equal(t, num, copy(changedObjects[:], expected.objects[:]))
sel := NewObjSelector(objReg, 0, SelectorOneshot, &ObjFilter{Status: statusCreated})
for i := range changedObjects {
changedObjects[i].Status = newStatus
require.NoError(t, objReg.SetObjectStatus(sel.NextObject().Id, statusCreated, newStatus))
}
objExp := NewObjExporter(NewObjSelector(objReg, 0, SelectorOneshot, &ObjFilter{Status: newStatus}))
require.NoError(t, objExp.ExportJSONPreGen(expected.jsonName))
require.NoError(t, checkExported(changedObjects, expected.jsonName))
}
func runExportEmptyTest(t *testing.T) {
expected := getExpectedResult(t)
expected.objects = make([]ObjectInfo, 0)
objReg := getFilledRegistry(t, expected)
objExp := NewObjExporter(NewObjSelector(objReg, 0, SelectorOneshot, &ObjFilter{Status: statusCreated}))
require.NoError(t, objExp.ExportJSONPreGen(expected.jsonName))
require.NoError(t, checkExported(expected.objects, expected.jsonName))
}
func getExpectedResult(t *testing.T) expectedResult {
num := randPositiveInt(2, 100)
mode := getMode(t.Name())
require.NotEqual(t, "", mode, "test mode should contain either \"s3\" or\"grpc\"")
dir := t.TempDir()
res := expectedResult{
mode: mode,
objects: generateObjectInfo(num, t.Name()),
dir: dir,
dbName: filepath.Join(dir, "registry-"+mode+".db"),
jsonName: filepath.Join(dir, "registry-"+mode+".json"),
}
return res
}
func randPositiveInt(min, max int) int {
return rand.Intn(max-min) + min
}
func getMode(name string) (res string) {
if strings.Contains(name, "s3") {
res = filepath.Base(name)
}
if strings.Contains(name, "grpc") {
res = filepath.Base(name)
}
return res
}
func generateObjectInfo(num int, mode string) []ObjectInfo {
res := make([]ObjectInfo, num)
for i := range res {
res[i] = randomObjectInfo()
if !strings.Contains(mode, "s3") {
res[i].S3Bucket = ""
res[i].S3Key = ""
}
if !strings.Contains(mode, "grpc") {
res[i].CID = ""
res[i].OID = ""
}
}
return res
}
func getFilledRegistry(t *testing.T, expected expectedResult) *ObjRegistry {
objReg := NewObjRegistry(context.Background(), expected.dbName)
for i := range expected.objects {
require.NoError(t, objReg.AddObject(expected.objects[i].CID, expected.objects[i].OID, expected.objects[i].S3Bucket, expected.objects[i].S3Key, expected.objects[i].PayloadHash))
}
return objReg
}
func checkExported(expected []ObjectInfo, fileName string) error {
file, err := os.ReadFile(fileName)
if err != nil {
return err
}
if !json.Valid(file) {
return fmt.Errorf("exported json file %s is invalid", fileName)
}
var actual PreGenerateInfo
if json.Unmarshal(file, &actual) != nil {
return err
}
if len(expected) != len(actual.Objects) {
return fmt.Errorf("expected len(): %v, got len(): %v", len(expected), len(actual.Objects))
}
for i := range expected {
if !slices.ContainsFunc(actual.Objects, func(oi ObjInfo) bool {
compareS3 := oi.Bucket == expected[i].S3Bucket && oi.Object == expected[i].S3Key
comparegRPC := oi.CID == expected[i].CID && oi.OID == expected[i].OID
return compareS3 && comparegRPC
}) {
return fmt.Errorf("object %v not found in exported json file %s", expected[i], fileName)
}
}
return nil
}

View file

@ -101,7 +101,7 @@ func randomObjectInfo() ObjectInfo {
func randString(n int) string {
var sb strings.Builder
for i := 0; i < n; i++ {
sb.WriteRune('a' + rune(rand.Int31())%('z'-'a'+1))
sb.WriteRune('a' + rune(rand.Int())%('z'-'a'+1))
}
return sb.String()
}

View file

@ -75,7 +75,7 @@ func (o *ObjRegistry) AddObject(cid, oid, s3Bucket, s3Key, payloadHash string) e
}
func (o *ObjRegistry) SetObjectStatus(id uint64, oldStatus, newStatus string) error {
return o.boltDB.Batch(func(tx *bbolt.Tx) error {
return o.boltDB.Update(func(tx *bbolt.Tx) error {
oldB := tx.Bucket([]byte(oldStatus))
if oldB == nil {
return fmt.Errorf("bucket doesn't exist: '%s'", oldStatus)
@ -110,7 +110,7 @@ func (o *ObjRegistry) SetObjectStatus(id uint64, oldStatus, newStatus string) er
}
func (o *ObjRegistry) DeleteObject(id uint64) error {
return o.boltDB.Batch(func(tx *bbolt.Tx) error {
return o.boltDB.Update(func(tx *bbolt.Tx) error {
return tx.ForEach(func(_ []byte, b *bbolt.Bucket) error {
return b.Delete(encodeId(id))
})

View file

@ -3,15 +3,12 @@ package registry
import (
"context"
"fmt"
"sync"
"time"
"github.com/nspcc-dev/neo-go/pkg/io"
"go.etcd.io/bbolt"
)
const nextObjectTimeout = 10 * time.Second
type ObjFilter struct {
Status string
Age int
@ -24,8 +21,6 @@ type ObjSelector struct {
filter *ObjFilter
cacheSize int
kind SelectorKind
// Sync synchronizes VU used for deletion.
Sync sync.WaitGroup
}
// objectSelectCache is the default maximum size of a batch to select from DB.
@ -62,18 +57,9 @@ func NewObjSelector(registry *ObjRegistry, selectionSize int, kind SelectorKind,
// - underlying registry context is done, nil objects will be returned on the
// currently blocked and every further NextObject calls.
func (o *ObjSelector) NextObject() *ObjectInfo {
if o.kind == SelectorOneshot {
return <-o.objChan
}
select {
case <-time.After(nextObjectTimeout):
return nil
case obj := <-o.objChan:
return obj
}
}
// Count returns total number of objects that match filter of the selector.
func (o *ObjSelector) Count() (int, error) {
count := 0

View file

@ -142,70 +142,6 @@ func (c *Client) Get(bucket, key string) GetResponse {
return GetResponse{Success: true}
}
// DeleteObjectVersion deletes object version with specified versionID.
// If version argument is empty, deletes all versions and delete-markers of specified object.
func (c *Client) DeleteObjectVersion(bucket, key, version string) DeleteResponse {
var toDelete []types.ObjectIdentifier
if version != "" {
toDelete = append(toDelete, types.ObjectIdentifier{
Key: aws.String(key),
VersionId: aws.String(version),
})
} else {
versions, err := c.cli.ListObjectVersions(c.vu.Context(), &s3.ListObjectVersionsInput{
Bucket: aws.String(bucket),
Prefix: aws.String(key),
})
if err != nil {
stats.Report(c.vu, objDeleteFails, 1)
return DeleteResponse{Success: false, Error: err.Error()}
}
toDelete = filterObjectVersions(versions, key)
}
if len(toDelete) == 0 {
return c.Delete(bucket, key)
} else {
_, err := c.cli.DeleteObjects(c.vu.Context(), &s3.DeleteObjectsInput{
Bucket: aws.String(bucket),
Delete: &types.Delete{
Objects: toDelete,
Quiet: true,
},
})
if err != nil {
stats.Report(c.vu, objDeleteFails, 1)
return DeleteResponse{Success: false, Error: err.Error()}
}
}
return DeleteResponse{Success: true}
}
func filterObjectVersions(versions *s3.ListObjectVersionsOutput, key string) []types.ObjectIdentifier {
var result []types.ObjectIdentifier
for _, v := range versions.Versions {
if *v.Key == key {
result = append(result, types.ObjectIdentifier{
Key: v.Key,
VersionId: v.VersionId,
})
}
}
for _, marker := range versions.DeleteMarkers {
if *marker.Key == key {
result = append(result, types.ObjectIdentifier{
Key: marker.Key,
VersionId: marker.VersionId,
})
}
}
return result
}
func get(
c *s3.Client,
bucket string,
@ -244,7 +180,7 @@ func (c *Client) VerifyHash(bucket, key, expectedHash string) VerifyHashResponse
}
actualHash := hex.EncodeToString(hasher.Sum(nil))
if actualHash != expectedHash {
return VerifyHashResponse{Success: false, Error: "hash mismatch"}
return VerifyHashResponse{Success: true, Error: "hash mismatch"}
}
return VerifyHashResponse{Success: true}
@ -279,26 +215,6 @@ func (c *Client) CreateBucket(bucket string, params map[string]string) CreateBuc
return CreateBucketResponse{Success: false, Error: err.Error()}
}
var versioning bool
if strVersioned, ok := params["versioning"]; ok {
if versioning, err = strconv.ParseBool(strVersioned); err != nil {
stats.Report(c.vu, createBucketFails, 1)
return CreateBucketResponse{Success: false, Error: err.Error()}
}
}
if versioning {
_, err = c.cli.PutBucketVersioning(context.TODO(), &s3.PutBucketVersioningInput{
Bucket: aws.String(bucket),
VersioningConfiguration: &types.VersioningConfiguration{
Status: types.BucketVersioningStatusEnabled,
},
})
if err != nil {
stats.Report(c.vu, createBucketFails, 1)
return CreateBucketResponse{Success: false, Error: err.Error()}
}
}
stats.Report(c.vu, createBucketSuccess, 1)
stats.Report(c.vu, createBucketDuration, metrics.D(time.Since(start)))
return CreateBucketResponse{Success: true}

View file

@ -31,8 +31,8 @@ const grpc_endpoint =
const grpc_client = native.connect(
grpc_endpoint, '', __ENV.DIAL_TIMEOUT ? parseInt(__ENV.DIAL_TIMEOUT) : 5,
__ENV.STREAM_TIMEOUT ? parseInt(__ENV.STREAM_TIMEOUT) : 60,
__ENV.PREPARE_LOCALLY ? __ENV.PREPARE_LOCALLY.toLowerCase() === 'true' : false,
1024 * parseInt(__ENV.MAX_OBJECT_SIZE || '0'));
__ENV.PREPARE_LOCALLY ? __ENV.PREPARE_LOCALLY.toLowerCase() === 'true'
: false);
const log = logging.new().withField('endpoint', grpc_endpoint);
const registry_enabled = !!__ENV.REGISTRY_FILE;

View file

@ -30,8 +30,8 @@ const grpc_endpoint =
const grpc_client = native.connect(
grpc_endpoint, '', __ENV.DIAL_TIMEOUT ? parseInt(__ENV.DIAL_TIMEOUT) : 5,
__ENV.STREAM_TIMEOUT ? parseInt(__ENV.STREAM_TIMEOUT) : 60,
__ENV.PREPARE_LOCALLY ? __ENV.PREPARE_LOCALLY.toLowerCase() === 'true' : false,
1024 * parseInt(__ENV.MAX_OBJECT_SIZE || '0'));
__ENV.PREPARE_LOCALLY ? __ENV.PREPARE_LOCALLY.toLowerCase() === 'true' :
false);
const log = logging.new().withField('endpoint', grpc_endpoint);
const registry_enabled = !!__ENV.REGISTRY_FILE;

View file

@ -1,34 +0,0 @@
import { uuidv4 } from './k6-utils-1.4.0.js';
export function generateS3Key() {
let width = parseInt(__ENV.DIR_WIDTH || '0');
let height = parseInt(__ENV.DIR_HEIGHT || '0');
let key = ''
if (width > 0 && height > 0) {
for (let index = 0; index < height; index++) {
const w = Math.floor(Math.random() * width) + 1;
key = key + 'dir' + w + '/';
}
}
key += objName();
return key;
}
const asciiLetters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890"
function objName() {
if (__ENV.OBJ_NAME) {
return __ENV.OBJ_NAME;
}
const length = parseInt(__ENV.OBJ_NAME_LENGTH || '0');
if (length > 0) {
let name = "";
for (let i = 0; i < length; i++) {
name += asciiLetters.charAt(Math.floor(Math.random() * asciiLetters.length));
}
return name;
}
return uuidv4();
}

View file

@ -4,18 +4,17 @@ from helpers.cmd import execute_cmd, log
def create_bucket(endpoint, versioning, location, acl, no_verify_ssl):
configuration = ""
if location:
configuration = f"--create-bucket-configuration 'LocationConstraint={location}'"
location = f"--create-bucket-configuration 'LocationConstraint={location}'"
if acl:
acl = f"--acl {acl}"
bucket_name = str(uuid.uuid4())
no_verify_ssl_str = "--no-verify-ssl" if no_verify_ssl else ""
cmd_line = f"aws {no_verify_ssl_str} s3api create-bucket --bucket {bucket_name} " \
f"--endpoint {endpoint} {configuration} {acl} "
f"--endpoint {endpoint} {location} {acl} "
cmd_line_ver = f"aws {no_verify_ssl_str} s3api put-bucket-versioning --bucket {bucket_name} " \
f"--versioning-configuration Status=Enabled --endpoint {endpoint}"
f"--versioning-configuration Status=Enabled --endpoint {endpoint} {acl} "
output, success = execute_cmd(cmd_line)
@ -25,7 +24,7 @@ def create_bucket(endpoint, versioning, location, acl, no_verify_ssl):
f"Error: {output}", endpoint)
return False
if versioning:
if versioning == "True":
output, success = execute_cmd(cmd_line_ver)
if not success:
log(f"{cmd_line_ver}\n"
@ -34,7 +33,7 @@ def create_bucket(endpoint, versioning, location, acl, no_verify_ssl):
else:
log(f"Bucket versioning has been applied for bucket {bucket_name}", endpoint)
log(f"Created bucket: {bucket_name} ({location})", endpoint)
log(f"Created bucket: {bucket_name}", endpoint)
return bucket_name

View file

@ -1,16 +1,18 @@
import re
from helpers.cmd import execute_cmd, log
def create_container(endpoint, policy, container_creation_retry, wallet_path, config, rules, local=False, retry=0):
if retry > int(container_creation_retry):
def create_container(endpoint, policy, wallet_path, config, acl, local=False, depth=0):
if depth > 20:
raise ValueError(f"unable to create container: too many unsuccessful attempts")
if wallet_path:
wallet_file = f"--wallet {wallet_path}"
if config:
wallet_config = f"--config {config}"
if acl:
acl_param = f"--basic-acl {acl}"
cmd_line = f"frostfs-cli --rpc-endpoint {endpoint} container create {wallet_file} {wallet_config} " \
f" --policy '{policy}' --await"
f" --policy '{policy}' {acl_param} --await"
output, success = execute_cmd(cmd_line)
@ -32,21 +34,7 @@ def create_container(endpoint, policy, container_creation_retry, wallet_path, co
raise ValueError(f"no CID was parsed from command output:\t{fst_str}")
cid = splitted[1]
log(f"Created container: {cid} ({policy})", endpoint)
# Add rule for container
if rules:
r = ""
for rule in rules:
r += f" --rule '{rule}' "
cmd_line = f"frostfs-cli --rpc-endpoint {endpoint} ape-manager add {wallet_file} {wallet_config} " \
f" --chain-id 'chain-id' {r} --target-name '{cid}' --target-type 'container'"
output, success = execute_cmd(cmd_line)
if not success:
log(f"{cmd_line}\n"
f"Rule has not been added\n"
f"{output}", endpoint)
return False
log(f"Created container {cid}", endpoint)
if not local:
return cid
@ -100,7 +88,7 @@ def create_container(endpoint, policy, container_creation_retry, wallet_path, co
return cid
log(f"Created container {cid} is not stored on {endpoint}, creating another one...", endpoint)
return create_container(endpoint, policy, container_creation_retry, wallet_path, config, rules, local, retry + 1)
return create_container(endpoint, policy, wallet_path, config, acl, local, depth + 1)
def upload_object(container, payload_filepath, endpoint, wallet_file, wallet_config):

View file

@ -15,21 +15,18 @@ from helpers.frostfs_cli import create_container, upload_object
ERROR_WRONG_CONTAINERS_COUNT = 1
ERROR_WRONG_OBJECTS_COUNT = 2
MAX_WORKERS = 50
DEFAULT_POLICY = "REP 2 IN X CBF 2 SELECT 2 FROM * AS X"
DEFAULT_RULES = ["allow Object.* *"]
parser = argparse.ArgumentParser()
parser.add_argument('--size', help='Upload objects size in kb')
parser.add_argument('--containers', help='Number of containers to create')
parser.add_argument('--retry', default=20, help='Maximum number of retries to create a container')
parser.add_argument('--out', help='JSON file with output')
parser.add_argument('--preload_obj', help='Number of pre-loaded objects')
parser.add_argument('--wallet', help='Wallet file path')
parser.add_argument('--config', help='Wallet config file path')
parser.add_argument(
"--policy",
help=f"Container placement policy. Default is {DEFAULT_POLICY}",
action="append"
help="Container placement policy",
default="REP 2 IN X CBF 2 SELECT 2 FROM * AS X"
)
parser.add_argument('--endpoint', help='Nodes addresses separated by comma.')
parser.add_argument('--update', help='Save existed containers')
@ -38,10 +35,7 @@ parser.add_argument('--workers', help='Count of workers in preset. Max = 50, Def
parser.add_argument('--sleep', help='Time to sleep between containers creation and objects upload (in seconds), '
'Default = 8', default=8)
parser.add_argument('--local', help='Create containers that store data on provided endpoints. Warning: additional empty containers may be created.', action='store_true')
parser.add_argument(
'--rule',
help='Rule attached to created containers. All entries of CONTAINER_ID will be replaced with id of created container.',
action="append")
parser.add_argument('--acl', help='Container ACL. Default is public-read-write.', default='public-read-write')
args: Namespace = parser.parse_args()
print(args)
@ -52,17 +46,11 @@ def main():
objects_list = []
endpoints = args.endpoint.split(',')
if not args.policy:
args.policy = [DEFAULT_POLICY]
container_creation_retry = args.retry
wallet = args.wallet
wallet_config = args.config
workers = int(args.workers)
objects_per_container = int(args.preload_obj)
rules = args.rule
if not rules:
rules = DEFAULT_RULES
ignore_errors = args.ignore_errors
if args.update:
@ -75,9 +63,9 @@ def main():
containers_count = int(args.containers)
print(f"Create containers: {containers_count}")
with ProcessPoolExecutor(max_workers=min(MAX_WORKERS, workers)) as executor:
containers_runs = [executor.submit(create_container, endpoint, policy, container_creation_retry, wallet, wallet_config, rules, args.local)
for _, endpoint, policy in
zip(range(containers_count), cycle(endpoints), cycle(args.policy))]
containers_runs = [executor.submit(create_container, endpoint, args.policy, wallet, wallet_config, args.acl, args.local)
for _, endpoint in
zip(range(containers_count), cycle(endpoints))]
for run in containers_runs:
container_id = run.result()

View file

@ -11,12 +11,6 @@ from concurrent.futures import ProcessPoolExecutor
from helpers.cmd import random_payload
from helpers.aws_cli import create_bucket, upload_object
ERROR_WRONG_CONTAINERS_COUNT = 1
ERROR_WRONG_OBJECTS_COUNT = 2
ERROR_WRONG_PERCENTAGE = 3
MAX_WORKERS = 50
DEFAULT_LOCATION = ""
parser = argparse.ArgumentParser()
parser.add_argument('--size', help='Upload objects size in kb.')
@ -26,9 +20,8 @@ parser.add_argument('--preload_obj', help='Number of pre-loaded objects.')
parser.add_argument('--endpoint', help='S3 Gateways addresses separated by comma.')
parser.add_argument('--update', help='True/False, False by default. Save existed buckets from target file (--out). '
'New buckets will not be created.')
parser.add_argument('--location', help=f'AWS location constraint. Default is "{DEFAULT_LOCATION}"', action="append")
parser.add_argument('--versioning', help='True/False, False by default. Alias of --buckets_versioned=100')
parser.add_argument('--buckets_versioned', help='Percent of versioned buckets. Default is 0', default=0)
parser.add_argument('--location', help='AWS location. Will be empty, if has not be declared.', default="")
parser.add_argument('--versioning', help='True/False, False by default.')
parser.add_argument('--ignore-errors', help='Ignore preset errors', action='store_true')
parser.add_argument('--no-verify-ssl', help='Ignore SSL verifications', action='store_true')
parser.add_argument('--workers', help='Count of workers in preset. Max = 50, Default = 50', default=50)
@ -39,6 +32,10 @@ parser.add_argument('--acl', help='Bucket ACL. Default is private. Expected valu
args = parser.parse_args()
print(args)
ERROR_WRONG_CONTAINERS_COUNT = 1
ERROR_WRONG_OBJECTS_COUNT = 2
MAX_WORKERS = 50
def main():
buckets = []
objects_list = []
@ -46,8 +43,6 @@ def main():
no_verify_ssl = args.no_verify_ssl
endpoints = args.endpoint.split(',')
if not args.location:
args.location = [DEFAULT_LOCATION]
workers = int(args.workers)
objects_per_bucket = int(args.preload_obj)
@ -64,18 +59,9 @@ def main():
print(f"Create buckets: {buckets_count}")
with ProcessPoolExecutor(max_workers=min(MAX_WORKERS, workers)) as executor:
if not 0 <= int(args.buckets_versioned) <= 100:
print(f"Percent of versioned buckets must be between 0 and 100: got {args.buckets_versioned}")
if not ignore_errors:
sys.exit(ERROR_WRONG_PERCENTAGE)
if args.versioning == "True":
versioning_per_bucket = [True] * buckets_count
else:
num_versioned_buckets = int((int(args.buckets_versioned) / 100) * buckets_count)
versioning_per_bucket = [True] * num_versioned_buckets + [False] * (buckets_count - num_versioned_buckets)
buckets_runs = [executor.submit(create_bucket, endpoint, versioning_per_bucket[i], location, args.acl, no_verify_ssl)
for i, endpoint, location in
zip(range(buckets_count), cycle(endpoints), cycle(args.location))]
buckets_runs = [executor.submit(create_bucket, endpoint, args.versioning, args.location, args.acl, no_verify_ssl)
for _, endpoint in
zip(range(buckets_count), cycle(endpoints))]
for run in buckets_runs:
bucket_name = run.result()

View file

@ -125,7 +125,7 @@ The tests will use all pre-created buckets for PUT operations and all pre-create
$ ./scenarios/preset/preset_s3.py --size 1024 --buckets 1 --out s3_1024kb.json --endpoint host1:8084 --preload_obj 500 --location load-1-4
```
* '--location' - specify the name of container policy (from policy.json file). It's important to run 'aws configure' each time when the policy file has been changed to pick up the latest policies.
* '--buckets_versioned' - specify the percentage of versioned buckets from the total number of created buckets. Default is 0
3. Execute scenario with options:
```shell
@ -138,8 +138,6 @@ Options (in addition to the common options):
* `DELETE_AGE` - age of object in seconds before which it can not be deleted. This parameter can be used to control how many objects we have in the system under load.
* `SLEEP_DELETE` - time interval (in seconds) between deleting VU iterations.
* `OBJ_NAME` - if specified, this name will be used for all write operations instead of random generation.
* `OBJ_NAME_LENGTH` - if specified, then name of the object will be generated with the specified length of ASCII characters.
* `DIR_HEIGHT`, `DIR_WIDTH` - if both specified, object name will consist of `DIR_HEIGHT` directories, each of which can have `DIR_WIDTH` subdirectories, for example for `DIR_HEIGHT = 3, DIR_WIDTH = 100`, object names will be `/dir{1...100}/dir{1...100}/dir{1...100}/{uuid || OBJ_NAME}`
## S3 Multipart

View file

@ -6,10 +6,10 @@ import registry from 'k6/x/frostfs/registry';
import s3 from 'k6/x/frostfs/s3';
import stats from 'k6/x/frostfs/stats';
import {generateS3Key} from './libs/keygen.js';
import {newGenerator} from './libs/datagen.js';
import {parseEnv} from './libs/env-parser.js';
import {textSummary} from './libs/k6-summary-0.0.2.js';
import {newGenerator} from './libs/datagen.js';
import {uuidv4} from './libs/k6-utils-1.4.0.js';
parseEnv();
@ -132,10 +132,6 @@ export function setup() {
const start_timestamp = Date.now()
console.log(
`Load started at: ${Date(start_timestamp).toString()}`)
if (delete_vu_count > 0){
obj_to_delete_selector.sync.add(delete_vu_count)
}
}
export function teardown(data) {
@ -159,7 +155,7 @@ export function obj_write() {
sleep(__ENV.SLEEP_WRITE);
}
const key = generateS3Key();
const key = __ENV.OBJ_NAME || uuidv4();
const bucket = bucket_list[Math.floor(Math.random() * bucket_list.length)];
const payload = generator.genPayload();
@ -208,8 +204,6 @@ export function obj_delete() {
const obj = obj_to_delete_selector.nextObject();
if (!obj) {
if (obj_to_delete_exit_on_null) {
obj_to_delete_selector.sync.done()
obj_to_delete_selector.sync.wait()
exec.test.abort("No more objects to select");
}
return;

View file

@ -5,10 +5,10 @@ import registry from 'k6/x/frostfs/registry';
import s3 from 'k6/x/frostfs/s3';
import stats from 'k6/x/frostfs/stats';
import {generateS3Key} from './libs/keygen.js';
import {newGenerator} from './libs/datagen.js';
import {parseEnv} from './libs/env-parser.js';
import {textSummary} from './libs/k6-summary-0.0.2.js';
import {uuidv4} from './libs/k6-utils-1.4.0.js';
parseEnv();
@ -177,7 +177,7 @@ export function obj_write() {
sleep(__ENV.SLEEP_WRITE);
}
const key = generateS3Key();
const key = __ENV.OBJ_NAME || uuidv4();
const bucket = bucket_list[Math.floor(Math.random() * bucket_list.length)];
const payload = generator.genPayload();

View file

@ -1,233 +0,0 @@
import {sleep} from 'k6';
import {SharedArray} from 'k6/data';
import exec from 'k6/execution';
import logging from 'k6/x/frostfs/logging';
import registry from 'k6/x/frostfs/registry';
import s3 from 'k6/x/frostfs/s3';
import stats from 'k6/x/frostfs/stats';
import {generateS3Key} from './libs/keygen.js';
import {newGenerator} from './libs/datagen.js';
import {parseEnv} from './libs/env-parser.js';
import {textSummary} from './libs/k6-summary-0.0.2.js';
parseEnv();
const obj_list = new SharedArray(
'obj_list',
function() { return JSON.parse(open(__ENV.PREGEN_JSON)).objects; });
const bucket_list = new SharedArray(
'bucket_list',
function() { return JSON.parse(open(__ENV.PREGEN_JSON)).buckets; });
const read_size = JSON.parse(open(__ENV.PREGEN_JSON)).obj_size;
const summary_json = __ENV.SUMMARY_JSON || '/tmp/summary.json';
const no_verify_ssl = __ENV.NO_VERIFY_SSL || 'true';
const connection_args = {
no_verify_ssl : no_verify_ssl
}
// Select random S3 endpoint for current VU
const s3_endpoints = __ENV.S3_ENDPOINTS.split(',');
const s3_endpoint =
s3_endpoints[Math.floor(Math.random() * s3_endpoints.length)];
const s3_client = s3.connect(s3_endpoint, connection_args);
const log = logging.new().withField('endpoint', s3_endpoint);
const registry_enabled = !!__ENV.REGISTRY_FILE;
const obj_registry =
registry_enabled ? registry.open(__ENV.REGISTRY_FILE) : undefined;
const duration = __ENV.DURATION;
if (!!__ENV.METRIC_TAGS) {
stats.setTags(__ENV.METRIC_TAGS)
}
const read_age = __ENV.READ_AGE ? parseInt(__ENV.READ_AGE) : 10;
let obj_to_read_selector = undefined;
if (registry_enabled) {
obj_to_read_selector = registry.getSelector(
__ENV.REGISTRY_FILE, 'obj_to_read',
__ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0, {
status : 'created',
age : read_age,
})
}
const scenarios = {};
const write_vu_count = parseInt(__ENV.WRITERS || '0');
const generator = newGenerator(write_vu_count > 0);
if (write_vu_count > 0) {
scenarios.write = {
executor : 'constant-vus',
vus : write_vu_count,
duration : `${duration}s`,
exec : 'obj_write',
gracefulStop : '5s',
};
}
const read_vu_count = parseInt(__ENV.READERS || '0');
if (read_vu_count > 0) {
scenarios.read = {
executor : 'constant-vus',
vus : read_vu_count,
duration : `${duration}s`,
exec : 'obj_read',
gracefulStop : '5s',
};
}
const delete_age = __ENV.DELETE_AGE ? parseInt(__ENV.DELETE_AGE) : undefined;
let obj_to_delete_selector = undefined;
let obj_to_delete_exit_on_null = undefined;
if (registry_enabled ) {
obj_to_delete_exit_on_null = (write_vu_count == 0) && (read_vu_count == 0)
let constructor = obj_to_delete_exit_on_null ? registry.getOneshotSelector
: registry.getSelector;
obj_to_delete_selector =
constructor(__ENV.REGISTRY_FILE, 'obj_to_delete',
__ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0, {
status : 'read',
age : delete_age,
});
}
const delete_vu_count = parseInt(__ENV.DELETERS || '0');
if (delete_vu_count > 0) {
if (!obj_to_delete_selector) {
throw 'Positive DELETE worker number without a proper object selector';
}
scenarios.delete = {
executor : 'constant-vus',
vus : delete_vu_count,
duration : `${duration}s`,
exec : 'obj_delete',
gracefulStop : '5s',
};
}
export const options = {
scenarios,
setupTimeout : '5s',
};
export function setup() {
const total_vu_count = write_vu_count + read_vu_count + delete_vu_count;
console.log(`Pregenerated buckets: ${bucket_list.length}`);
console.log(`Pregenerated read object size: ${read_size}`);
console.log(`Pregenerated total objects: ${obj_list.length}`);
console.log(`Reading VUs: ${read_vu_count}`);
console.log(`Writing VUs: ${write_vu_count}`);
console.log(`Deleting VUs: ${delete_vu_count}`);
console.log(`Total VUs: ${total_vu_count}`);
const start_timestamp = Date.now()
console.log(
`Load started at: ${Date(start_timestamp).toString()}`)
}
export function teardown(data) {
if (obj_registry) {
obj_registry.close();
}
const end_timestamp = Date.now()
console.log(
`Load finished at: ${Date(end_timestamp).toString()}`)
}
export function handleSummary(data) {
return {
'stdout' : textSummary(data, {indent : ' ', enableColors : false}),
[summary_json] : JSON.stringify(data),
};
}
export function obj_write() {
if (__ENV.SLEEP_WRITE) {
sleep(__ENV.SLEEP_WRITE);
}
const key = generateS3Key();
const bucket = bucket_list[Math.floor(Math.random() * bucket_list.length)];
const payload = generator.genPayload();
const resp = s3_client.put(bucket, key, payload);
if (!resp.success) {
log.withFields({bucket : bucket, key : key}).error(resp.error);
return;
}
if (obj_registry) {
obj_registry.addObject('', '', bucket, key, payload.hash());
}
}
export function obj_read() {
if (__ENV.SLEEP_READ) {
sleep(__ENV.SLEEP_READ);
}
if (obj_to_read_selector) {
const obj = obj_to_read_selector.nextObject();
if (!obj ) {
return;
}
const resp = s3_client.get(obj.s3_bucket, obj.s3_key)
if (!resp.success) {
log.withFields({bucket : obj.s3_bucket, key : obj.s3_key, status: obj.status, op: `READ`})
.error(resp.error);
} else {
obj_registry.setObjectStatus(obj.id, obj.status, 'read');
}
return
}
const obj = obj_list[Math.floor(Math.random() * obj_list.length)];
const resp = s3_client.get(obj.bucket, obj.object);
if (!resp.success) {
log.withFields({bucket : obj.bucket, key : obj.object}).error(resp.error);
} else {
obj_registry.setObjectStatus(obj.id, obj.status, 'read');
}
}
export function obj_delete() {
if (__ENV.SLEEP_DELETE) {
sleep(__ENV.SLEEP_DELETE);
}
const obj = obj_to_delete_selector.nextObject();
delete_object(obj)
}
export function delete_object(obj) {
if (!obj) {
if (obj_to_delete_exit_on_null) {
exec.test.abort("No more objects to select");
}
return;
}
const resp = s3_client.delete(obj.s3_bucket, obj.s3_key);
if (!resp.success) {
log.withFields({bucket : obj.s3_bucket, key : obj.s3_key, op : 'DELETE'})
.error(resp.error);
return;
}
obj_registry.deleteObject(obj.id);
}

View file

@ -5,10 +5,10 @@ import registry from 'k6/x/frostfs/registry';
import s3 from 'k6/x/frostfs/s3';
import stats from 'k6/x/frostfs/stats';
import {generateS3Key} from './libs/keygen.js';
import {newGenerator} from './libs/datagen.js';
import {parseEnv} from './libs/env-parser.js';
import {textSummary} from './libs/k6-summary-0.0.2.js';
import {uuidv4} from './libs/k6-utils-1.4.0.js';
parseEnv();
@ -101,7 +101,7 @@ export function obj_write_multipart() {
sleep(__ENV.SLEEP_WRITE);
}
const key = generateS3Key();
const key = __ENV.OBJ_NAME || uuidv4();
const bucket = bucket_list[Math.floor(Math.random() * bucket_list.length)];
const payload = generator.genPayload();

View file

@ -5,7 +5,6 @@ import registry from 'k6/x/frostfs/registry';
import s3local from 'k6/x/frostfs/s3local';
import stats from 'k6/x/frostfs/stats';
import {generateS3Key} from './libs/keygen.js';
import {newGenerator} from './libs/datagen.js';
import {parseEnv} from './libs/env-parser.js';
import {textSummary} from './libs/k6-summary-0.0.2.js';
@ -132,7 +131,7 @@ export function handleSummary(data) {
}
export function obj_write() {
const key = generateS3Key();
const key = __ENV.OBJ_NAME || uuidv4();
const bucket = bucket_list[Math.floor(Math.random() * bucket_list.length)];
const payload = generator.genPayload();

View file

@ -44,8 +44,9 @@ if (__ENV.GRPC_ENDPOINTS) {
grpc_client = native.connect(
grpcEndpoint, '', __ENV.DIAL_TIMEOUT ? parseInt(__ENV.DIAL_TIMEOUT) : 0,
__ENV.STREAM_TIMEOUT ? parseInt(__ENV.STREAM_TIMEOUT) : 0,
__ENV.PREPARE_LOCALLY ? __ENV.PREPARE_LOCALLY.toLowerCase() === 'true' : false,
1024 * parseInt(__ENV.MAX_OBJECT_SIZE || '0'));
__ENV.PREPARE_LOCALLY ? __ENV.PREPARE_LOCALLY.toLowerCase() === 'true' :
false,
'');
}
// Connect to random S3 endpoint