Compare commits

...

20 commits

Author SHA1 Message Date
f5df03c718
[#173] s3: Fix missing import
Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2024-10-28 19:06:57 +03:00
1c7a3b3b6c
[#173] s3: Support variable key length
Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2024-10-28 18:57:51 +03:00
e0cbc3b763
[#124] s3: Allow to specify directory height and width
Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2024-10-25 12:20:15 +03:00
54f99dac1d [#172] Update README.md
Add info about `xk6-registry import` to README.md.

Signed-off-by: Ekaterina Lebedeva <ekaterina.lebedeva@yadro.com>
2024-10-08 20:27:17 +03:00
591f8af161 [#172] cli: Fix registry importer usage description
`status` flag is currently unsupported by `xk6-registry import`.

Signed-off-by: Ekaterina Lebedeva <ekaterina.lebedeva@yadro.com>
2024-10-08 20:27:01 +03:00
c2b8944af6 [#171] Makefile: add target to install golangci-lint
Signed-off-by: Ekaterina Lebedeva <ekaterina.lebedeva@yadro.com>
2024-10-02 16:48:45 +03:00
a47bf149d8 [#161] go.mod: Bump go version to 1.22
Signed-off-by: Ekaterina Lebedeva <ekaterina.lebedeva@yadro.com>
2024-10-02 12:24:19 +00:00
bcbd0db25f [#169] Fix broken logo link in README
Signed-off-by: Vitaliy Potyarkin <v.potyarkin@yadro.com>
2024-09-13 15:17:51 +03:00
17bbbe53e6 [#168] Update obsolete URLs
Signed-off-by: Vitaliy Potyarkin <v.potyarkin@yadro.com>
2024-09-11 14:10:53 +03:00
bede693470 [#153] selector: Add VU synchronization in 'Oneshot' mode
Signed-off-by: Alexander Chuprov <a.chuprov@yadro.com>
2024-09-10 11:56:40 +03:00
f539da7d89 [#166] preset: Add missing container_creation_retry parameter
Signed-off-by: Liza <e.chichindaeva@yadro.com>
2024-08-30 16:39:58 +03:00
6d3ecb6528 [#154] Add registry import cli utility
* Currently, objects created in preset are never deleted.
  k6 deletes only objects from registry, if registry file
  is not provided k6 delete load fails.
* Added cli utility to import objects created in preset
  into registry so k6 can delete them normally.

Signed-off-by: Ekaterina Lebedeva <ekaterina.lebedeva@yadro.com>
2024-08-23 13:41:01 +03:00
75f670b392 [#159] preset: Add optional max number of retries to create a container instead of hard-coded number 20
Signed-off-by: s.makhov <s.makhov@yadro.com>
2024-08-02 12:22:52 +03:00
9b9db46a07 [#152] Allow to set mix of policies for containers and buckets
Signed-off-by: a.berezin <a.berezin@yadro.com>
2024-07-02 20:45:31 +03:00
335c45c578 [#149] selector: Add read timeout
If there are no objects for 10 second, then return nil.
It is required to prevent VU iteration hang if there
are no objects pushed to registry.

Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2024-06-24 13:59:12 +03:00
e7d4dd404a [#150] scenarios: Use SelectorAwaiting for read and delete load in s3_dar.js, make delete_age optional
Signed-off-by: m.malygina <m.malygina@yadro.com>
2024-06-21 15:52:57 +03:00
0a9aeab47c [#150] In case we are running both read and delete load SelectorAwaiting
Signed-off-by: m.malygina <m.malygina@yadro.com>
2024-06-21 10:55:18 +03:00
3bc1229062 [#146] native: Add NetworkInfo cache
Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2024-06-17 09:32:20 +03:00
e92ce668a8 [#145] scenarios: Format js files with clang
Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2024-06-14 11:26:01 +03:00
6d1e7eb49e [#145] native: Allow to specify max_obj_size
For locally prepared objects it is possible now to
specify cut size.

Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2024-06-14 10:57:30 +03:00
30 changed files with 512 additions and 191 deletions

View file

@ -13,7 +13,7 @@ jobs:
- name: Setup Go
uses: actions/setup-go@v3
with:
go-version: '1.21'
go-version: '1.22'
- name: Run commit format checker
uses: https://git.frostfs.info/TrueCloudLab/dco-go@v2

View file

@ -11,20 +11,21 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version: '1.22'
go-version: '1.23'
cache: true
- name: golangci-lint
uses: https://github.com/golangci/golangci-lint-action@v3
with:
version: latest
- name: Install linters
run: make lint-install
- name: Run linters
run: make lint
tests:
name: Tests
runs-on: ubuntu-latest
strategy:
matrix:
go_versions: [ '1.21', '1.22' ]
go_versions: [ '1.22', '1.23' ]
fail-fast: false
steps:
- uses: actions/checkout@v3
@ -47,7 +48,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version: '1.21'
go-version: '1.22'
cache: true
- name: Run tests

View file

@ -3,8 +3,8 @@
First, thank you for contributing! We love and encourage pull requests from
everyone. Please follow the guidelines:
- Check the open [issues](https://github.com/TrueCloudLab/xk6-frostfs/issues) and
[pull requests](https://github.com/TrueCloudLab/xk6-frostfs/pulls) for existing
- Check the open [issues](https://git.frostfs.info/TrueCloudLab/xk6-frostfs/issues) and
[pull requests](https://git.frostfs.info/TrueCloudLab/xk6-frostfs/pulls) for existing
discussions.
- Open an issue first, to discuss a new feature or enhancement.
@ -28,18 +28,18 @@ send a pull request. We encourage pull requests to discuss code changes. Here
are the steps in details:
### Set up your GitHub Repository
Fork [xk6-frostfs upstream](https://github.com/TrueCloudLab/xk6-frostfs/fork) source
Fork [xk6-frostfs upstream](https://git.frostfs.info/TrueCloudLab/xk6-frostfs/fork) source
repository to your own personal repository. Copy the URL of your fork (you will
need it for the `git clone` command below).
```sh
$ git clone https://github.com/TrueCloudLab/xk6-frostfs
$ git clone https://git.frostfs.info/TrueCloudLab/xk6-frostfs
```
### Set up git remote as ``upstream``
```sh
$ cd xk6-frostfs
$ git remote add upstream https://github.com/TrueCloudLab/xk6-frostfs
$ git remote add upstream https://git.frostfs.info/TrueCloudLab/xk6-frostfs
$ git fetch upstream
$ git merge upstream/master
...

View file

@ -3,10 +3,15 @@
# Common variables
REPO ?= $(shell go list -m)
VERSION ?= $(shell git describe --tags --dirty --match "v*" --always --abbrev=8 2>/dev/null || cat VERSION 2>/dev/null || echo "develop")
GO_VERSION ?= 1.19
LINT_VERSION ?= 1.49.0
GO_VERSION ?= 1.22
LINT_VERSION ?= 1.60.3
TRUECLOUDLAB_LINT_VERSION ?= 0.0.7
BINDIR = bin
OUTPUT_LINT_DIR ?= $(abspath $(BINDIR))/linters
LINT_DIR = $(OUTPUT_LINT_DIR)/golangci-lint-$(LINT_VERSION)-v$(TRUECLOUDLAB_LINT_VERSION)
TMP_DIR := .cache
# Binaries to build
CMDS = $(addprefix frostfs-, $(notdir $(wildcard cmd/*)))
BINS = $(addprefix $(BINDIR)/, $(CMDS))
@ -64,7 +69,22 @@ format:
# Run linters
lint:
@golangci-lint --timeout=5m run
@if [ ! -d "$(LINT_DIR)" ]; then \
make lint-install; \
fi
$(LINT_DIR)/golangci-lint run --timeout=5m
# Install linters
lint-install:
@rm -rf $(OUTPUT_LINT_DIR)
@mkdir -p $(OUTPUT_LINT_DIR)
@mkdir -p $(TMP_DIR)
@rm -rf $(TMP_DIR)/linters
@git -c advice.detachedHead=false clone --branch v$(TRUECLOUDLAB_LINT_VERSION) https://git.frostfs.info/TrueCloudLab/linters.git $(TMP_DIR)/linters
@@make -C $(TMP_DIR)/linters lib CGO_ENABLED=1 OUT_DIR=$(OUTPUT_LINT_DIR)
@rm -rf $(TMP_DIR)/linters
@rmdir $(TMP_DIR) 2>/dev/null || true
@CGO_ENABLED=1 GOBIN=$(LINT_DIR) go install -trimpath github.com/golangci/golangci-lint/cmd/golangci-lint@v$(LINT_VERSION)
# Run linters in Docker
docker/lint:

View file

@ -1,5 +1,5 @@
<p align="center">
<img src="./.github/logo.svg" width="500px" alt="FrostFS logo">
<img src="./.forgejo/logo.svg" width="500px" alt="FrostFS logo">
</p>
<p align="center">
<a href="https://go.k6.io/k6">k6</a> extension to test and benchmark FrostFS related protocols.
@ -48,10 +48,11 @@ Create native client with `connect` method. Arguments:
- dial timeout in seconds (0 for the default value)
- stream timeout in seconds (0 for the default value)
- generate object header on the client side (for big object - split locally too)
- max size for generated object header on the client side (for big object - the size that the object is splitted into)
```js
import native from 'k6/x/frostfs/native';
const frostfs_cli = native.connect("s01.frostfs.devenv:8080", "", 0, 0, false)
const frostfs_cli = native.connect("s01.frostfs.devenv:8080", "", 0, 0, false, 0)
```
### Methods
@ -185,6 +186,25 @@ Flags:
-v, --version version for registry-exporter
```
## Import pregen into registry db
You can import pregenerated json files into registry bolt db. Use `frostfs-xk6-registry import`. Usage examples are in help:
```shell
$ ./bin/frostfs-xk6-registry import -h
Import objects into registry from pregenerated files
Usage:
xk6-registry import [flags]
Examples:
xk6-registry import registry.bolt preset.json
xk6-registry import registry.bolt preset.json another_preset.json
Flags:
-h, --help help for import
```
# License
- [GNU General Public License v3.0](LICENSE)

View file

@ -0,0 +1,55 @@
package importer
import (
"encoding/json"
"os"
"git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/registry"
)
type PreGenObj struct {
Bucket string `json:"bucket"`
Object string `json:"object"`
Container string `json:"container"`
}
type PreGenerateInfo struct {
Buckets []string `json:"buckets"`
Containers []string `json:"containers"`
Objects []PreGenObj `json:"objects"`
ObjSize string `json:"obj_size"`
}
// ImportJSONPreGen writes objects from pregenerated JSON file
// to the registry.
// Note that ImportJSONPreGen does not check if object already
// exists in the registry so in case of re-entry the registry
// will have two entities representing the same object.
func ImportJSONPreGen(o *registry.ObjRegistry, filename string) error {
f, err := os.ReadFile(filename)
if err != nil {
return err
}
var pregenInfo PreGenerateInfo
err = json.Unmarshal(f, &pregenInfo)
if err != nil {
return err
}
// AddObject uses DB.Batch to combine concurrent Batch calls
// into a single Bolt transaction. DB.Batch is limited by
// DB.MaxBatchDelay which may affect perfomance.
for _, obj := range pregenInfo.Objects {
if obj.Bucket != "" {
err = o.AddObject("", "", obj.Bucket, obj.Object, "")
} else {
err = o.AddObject(obj.Container, obj.Object, "", "", "")
}
if err != nil {
return err
}
}
return nil
}

View file

@ -0,0 +1,27 @@
package importer
import (
"git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/registry"
"github.com/spf13/cobra"
)
// Cmd represents the import command.
var Cmd = &cobra.Command{
Use: "import",
Short: "Import objects into registry",
Long: "Import objects into registry from pregenerated files",
Example: `xk6-registry import registry.bolt preset.json
xk6-registry import registry.bolt preset.json another_preset.json`,
RunE: runCmd,
Args: cobra.MinimumNArgs(2),
}
func runCmd(cmd *cobra.Command, args []string) error {
objRegistry := registry.NewObjRegistry(cmd.Context(), args[0])
for i := 1; i < len(args); i++ {
if err := ImportJSONPreGen(objRegistry, args[i]); err != nil {
return err
}
}
return nil
}

18
cmd/xk6-registry/main.go Normal file
View file

@ -0,0 +1,18 @@
package main
import (
"context"
"os"
"os/signal"
"syscall"
)
func main() {
ctx, _ := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM, syscall.SIGHUP)
if cmd, err := rootCmd.ExecuteContextC(ctx); err != nil {
cmd.PrintErrln("Error:", err.Error())
cmd.PrintErrf("Run '%v --help' for usage.\n", cmd.CommandPath())
os.Exit(1)
}
}

33
cmd/xk6-registry/root.go Normal file
View file

@ -0,0 +1,33 @@
package main
import (
"runtime"
"git.frostfs.info/TrueCloudLab/xk6-frostfs/cmd/xk6-registry/importer"
"git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/version"
"github.com/spf13/cobra"
)
var rootCmd = &cobra.Command{
Use: "xk6-registry",
Version: version.Version,
Short: "Command Line Tool to work with Registry",
Long: `Registry provides tools to work with object registry for xk6.
It contains command for importing objects in registry from preset`,
SilenceErrors: true,
SilenceUsage: true,
Run: rootCmdRun,
}
func init() {
cobra.AddTemplateFunc("runtimeVersion", runtime.Version)
rootCmd.SetVersionTemplate(`FrostFS xk6-registry
{{printf "Version: %s" .Version }}
GoVersion: {{ runtimeVersion }}
`)
rootCmd.AddCommand(importer.Cmd)
}
func rootCmdRun(cmd *cobra.Command, _ []string) {
_ = cmd.Usage()
}

View file

@ -3,11 +3,11 @@ import { fail } from "k6";
import { uuidv4 } from '../scenarios/libs/k6-utils-1.4.0.js';
const payload = open('../go.sum', 'b');
const frostfs_cli = native.connect("s01.frostfs.devenv:8080", "1dd37fba80fec4e6a6f13fd708d8dcb3b29def768017052f6c930fa1c5d90bbb", 0, 0, false)
const frostfs_cli = native.connect("s01.frostfs.devenv:8080", "1dd37fba80fec4e6a6f13fd708d8dcb3b29def768017052f6c930fa1c5d90bbb", 0, 0, false, 0)
export const options = {
stages: [
{duration: '30s', target: 10},
{ duration: '30s', target: 10 },
],
};
@ -24,7 +24,7 @@ export function setup() {
fail(res.error)
}
console.info("created container", res.container_id)
return {container_id: res.container_id}
return { container_id: res.container_id }
}
export default function (data) {

View file

@ -3,7 +3,7 @@ import { uuidv4 } from '../scenarios/libs/k6-utils-1.4.0.js';
const payload = open('../go.sum', 'b');
const container = "AjSxSNNXbJUDPqqKYm1VbFVDGCakbpUNH8aGjPmGAH3B"
const frostfs_cli = native.connect("s01.frostfs.devenv:8080", "", 0, 0, false)
const frostfs_cli = native.connect("s01.frostfs.devenv:8080", "", 0, 0, false, 0)
const frostfs_obj = frostfs_cli.onsite(container, payload)
export const options = {
@ -14,11 +14,11 @@ export const options = {
export default function () {
let headers = {
'unique_header': uuidv4()
'unique_header': uuidv4()
}
let resp = frostfs_obj.put(headers)
if (resp.success) {
frostfs_cli.get(container, resp.object_id)
frostfs_cli.get(container, resp.object_id)
} else {
console.log(resp.error)
}

2
go.mod
View file

@ -1,6 +1,6 @@
module git.frostfs.info/TrueCloudLab/xk6-frostfs
go 1.21
go 1.22
require (
git.frostfs.info/TrueCloudLab/frostfs-node v0.38.3-0.20240502170333-ec2873caa7c6

58
internal/native/cache.go Normal file
View file

@ -0,0 +1,58 @@
package native
import (
"context"
"sync"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
)
const networkCacheTTL = time.Minute
var networkInfoCache = &networkInfoCacheT{}
type networkInfoCacheT struct {
guard sync.RWMutex
current *netmap.NetworkInfo
fetchTS time.Time
}
func (c *networkInfoCacheT) getOrFetch(ctx context.Context, cli *client.Client) (*netmap.NetworkInfo, error) {
if v := c.get(); v != nil {
return v, nil
}
return c.fetch(ctx, cli)
}
func (c *networkInfoCacheT) get() *netmap.NetworkInfo {
c.guard.RLock()
defer c.guard.RUnlock()
if c.current == nil || time.Since(c.fetchTS) > networkCacheTTL {
return nil
}
return c.current
}
func (c *networkInfoCacheT) fetch(ctx context.Context, cli *client.Client) (*netmap.NetworkInfo, error) {
c.guard.Lock()
defer c.guard.Unlock()
if time.Since(c.fetchTS) <= networkCacheTTL {
return c.current, nil
}
res, err := cli.NetworkInfo(ctx, client.PrmNetworkInfo{})
if err != nil {
return nil, err
}
v := res.Info()
c.current = &v
c.fetchTS = time.Now()
return c.current, nil
}

View file

@ -35,6 +35,7 @@ type (
tok session.Object
cli *client.Client
prepareLocally bool
maxObjSize uint64
}
PutResponse struct {
@ -71,6 +72,7 @@ type (
hdr object.Object
payload []byte
prepareLocally bool
maxObjSize uint64
}
)
@ -103,7 +105,7 @@ func (c *Client) Put(containerID string, headers map[string]string, payload data
o.SetOwnerID(owner)
o.SetAttributes(attrs...)
resp, err := put(c.vu, c.cli, c.prepareLocally, &tok, &o, payload, chunkSize)
resp, err := put(c.vu, c.cli, c.prepareLocally, &tok, &o, payload, chunkSize, c.maxObjSize)
if err != nil {
return PutResponse{Success: false, Error: err.Error()}
}
@ -373,6 +375,7 @@ func (c *Client) Onsite(containerID string, payload datagen.Payload) PreparedObj
hdr: *obj,
payload: data,
prepareLocally: c.prepareLocally,
maxObjSize: c.maxObjSize,
}
}
@ -398,7 +401,7 @@ func (p PreparedObject) Put(headers map[string]string) PutResponse {
return PutResponse{Success: false, Error: err.Error()}
}
_, err = put(p.vu, p.cli, p.prepareLocally, nil, &obj, datagen.NewFixedPayload(p.payload), 0)
_, err = put(p.vu, p.cli, p.prepareLocally, nil, &obj, datagen.NewFixedPayload(p.payload), 0, p.maxObjSize)
if err != nil {
return PutResponse{Success: false, Error: err.Error()}
}
@ -413,7 +416,7 @@ func (s epochSource) CurrentEpoch() uint64 {
}
func put(vu modules.VU, cli *client.Client, prepareLocally bool, tok *session.Object,
hdr *object.Object, payload datagen.Payload, chunkSize int,
hdr *object.Object, payload datagen.Payload, chunkSize int, maxObjSize uint64,
) (*client.ResObjectPut, error) {
bufSize := defaultBufferSize
if chunkSize > 0 {
@ -434,13 +437,16 @@ func put(vu modules.VU, cli *client.Client, prepareLocally bool, tok *session.Ob
prm.MaxChunkLength = chunkSize
}
if prepareLocally {
res, err := cli.NetworkInfo(vu.Context(), client.PrmNetworkInfo{})
ni, err := networkInfoCache.getOrFetch(vu.Context(), cli)
if err != nil {
return nil, err
}
prm.MaxSize = res.Info().MaxObjectSize()
prm.EpochSource = epochSource(res.Info().CurrentEpoch())
prm.MaxSize = ni.MaxObjectSize()
prm.EpochSource = epochSource(ni.CurrentEpoch())
prm.WithoutHomomorphHash = true
if maxObjSize > 0 {
prm.MaxSize = maxObjSize
}
}
objectWriter, err := cli.ObjectPutInit(vu.Context(), prm)

View file

@ -52,13 +52,17 @@ func (n *Native) Exports() modules.Exports {
return modules.Exports{Default: n}
}
func (n *Native) Connect(endpoint, hexPrivateKey string, dialTimeout, streamTimeout int, prepareLocally bool) (*Client, error) {
func (n *Native) Connect(endpoint, hexPrivateKey string, dialTimeout, streamTimeout int, prepareLocally bool, maxObjSize int) (*Client, error) {
var (
cli client.Client
pk *keys.PrivateKey
err error
)
if maxObjSize < 0 {
return nil, fmt.Errorf("max object size value must be positive")
}
pk, err = keys.NewPrivateKey()
if len(hexPrivateKey) != 0 {
pk, err = keys.NewPrivateKeyFromHex(hexPrivateKey)
@ -114,6 +118,16 @@ func (n *Native) Connect(endpoint, hexPrivateKey string, dialTimeout, streamTime
tok.SetAuthKey(&key)
tok.SetExp(exp)
if prepareLocally && maxObjSize > 0 {
res, err := cli.NetworkInfo(n.vu.Context(), client.PrmNetworkInfo{})
if err != nil {
return nil, err
}
if uint64(maxObjSize) > res.Info().MaxObjectSize() {
return nil, fmt.Errorf("max object size must be not greater than %d bytes", res.Info().MaxObjectSize())
}
}
// register metrics
objPutSuccess, _ = stats.Registry.NewMetric("frostfs_obj_put_success", metrics.Counter)
@ -140,5 +154,6 @@ func (n *Native) Connect(endpoint, hexPrivateKey string, dialTimeout, streamTime
tok: tok,
cli: &cli,
prepareLocally: prepareLocally,
maxObjSize: uint64(maxObjSize),
}, nil
}

View file

@ -3,12 +3,15 @@ package registry
import (
"context"
"fmt"
"sync"
"time"
"github.com/nspcc-dev/neo-go/pkg/io"
"go.etcd.io/bbolt"
)
const nextObjectTimeout = 10 * time.Second
type ObjFilter struct {
Status string
Age int
@ -21,6 +24,8 @@ type ObjSelector struct {
filter *ObjFilter
cacheSize int
kind SelectorKind
// Sync synchronizes VU used for deletion.
Sync sync.WaitGroup
}
// objectSelectCache is the default maximum size of a batch to select from DB.
@ -57,7 +62,16 @@ func NewObjSelector(registry *ObjRegistry, selectionSize int, kind SelectorKind,
// - underlying registry context is done, nil objects will be returned on the
// currently blocked and every further NextObject calls.
func (o *ObjSelector) NextObject() *ObjectInfo {
return <-o.objChan
if o.kind == SelectorOneshot {
return <-o.objChan
}
select {
case <-time.After(nextObjectTimeout):
return nil
case obj := <-o.objChan:
return obj
}
}
// Count returns total number of objects that match filter of the selector.

View file

@ -1,25 +1,25 @@
import {sleep} from 'k6';
import {SharedArray} from 'k6/data';
import { sleep } from 'k6';
import { SharedArray } from 'k6/data';
import exec from 'k6/execution';
import logging from 'k6/x/frostfs/logging';
import native from 'k6/x/frostfs/native';
import registry from 'k6/x/frostfs/registry';
import stats from 'k6/x/frostfs/stats';
import {newGenerator} from './libs/datagen.js';
import {parseEnv} from './libs/env-parser.js';
import {textSummary} from './libs/k6-summary-0.0.2.js';
import {uuidv4} from './libs/k6-utils-1.4.0.js';
import { newGenerator } from './libs/datagen.js';
import { parseEnv } from './libs/env-parser.js';
import { textSummary } from './libs/k6-summary-0.0.2.js';
import { uuidv4 } from './libs/k6-utils-1.4.0.js';
parseEnv();
const obj_list = new SharedArray(
'obj_list',
function() { return JSON.parse(open(__ENV.PREGEN_JSON)).objects; });
'obj_list',
function () { return JSON.parse(open(__ENV.PREGEN_JSON)).objects; });
const container_list = new SharedArray(
'container_list',
function() { return JSON.parse(open(__ENV.PREGEN_JSON)).containers; });
'container_list',
function () { return JSON.parse(open(__ENV.PREGEN_JSON)).containers; });
const read_size = JSON.parse(open(__ENV.PREGEN_JSON)).obj_size;
const summary_json = __ENV.SUMMARY_JSON || '/tmp/summary.json';
@ -27,17 +27,17 @@ const summary_json = __ENV.SUMMARY_JSON || '/tmp/summary.json';
// Select random gRPC endpoint for current VU
const grpc_endpoints = __ENV.GRPC_ENDPOINTS.split(',');
const grpc_endpoint =
grpc_endpoints[Math.floor(Math.random() * grpc_endpoints.length)];
grpc_endpoints[Math.floor(Math.random() * grpc_endpoints.length)];
const grpc_client = native.connect(
grpc_endpoint, '', __ENV.DIAL_TIMEOUT ? parseInt(__ENV.DIAL_TIMEOUT) : 5,
__ENV.STREAM_TIMEOUT ? parseInt(__ENV.STREAM_TIMEOUT) : 60,
__ENV.PREPARE_LOCALLY ? __ENV.PREPARE_LOCALLY.toLowerCase() === 'true'
: false);
grpc_endpoint, '', __ENV.DIAL_TIMEOUT ? parseInt(__ENV.DIAL_TIMEOUT) : 5,
__ENV.STREAM_TIMEOUT ? parseInt(__ENV.STREAM_TIMEOUT) : 60,
__ENV.PREPARE_LOCALLY ? __ENV.PREPARE_LOCALLY.toLowerCase() === 'true' : false,
1024 * parseInt(__ENV.MAX_OBJECT_SIZE || '0'));
const log = logging.new().withField('endpoint', grpc_endpoint);
const registry_enabled = !!__ENV.REGISTRY_FILE;
const obj_registry =
registry_enabled ? registry.open(__ENV.REGISTRY_FILE) : undefined;
registry_enabled ? registry.open(__ENV.REGISTRY_FILE) : undefined;
const duration = __ENV.DURATION;
@ -49,11 +49,11 @@ const read_age = __ENV.READ_AGE ? parseInt(__ENV.READ_AGE) : 10;
let obj_to_read_selector = undefined;
if (registry_enabled) {
obj_to_read_selector = registry.getLoopedSelector(
__ENV.REGISTRY_FILE, 'obj_to_read',
__ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0, {
status : 'created',
age : read_age,
})
__ENV.REGISTRY_FILE, 'obj_to_read',
__ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0, {
status: 'created',
age: read_age,
})
}
const scenarios = {};
@ -63,11 +63,11 @@ const write_grpc_chunk_size = 1024 * parseInt(__ENV.GRPC_CHUNK_SIZE || '0')
const generator = newGenerator(write_vu_count > 0);
if (write_vu_count > 0) {
scenarios.write = {
executor : 'constant-vus',
vus : write_vu_count,
duration : `${duration}s`,
exec : 'obj_write',
gracefulStop : '5s',
executor: 'constant-vus',
vus: write_vu_count,
duration: `${duration}s`,
exec: 'obj_write',
gracefulStop: '5s',
};
}
@ -78,24 +78,24 @@ if (registry_enabled && delete_age) {
obj_to_delete_exit_on_null = write_vu_count == 0;
let constructor = obj_to_delete_exit_on_null ? registry.getOneshotSelector
: registry.getSelector;
: registry.getSelector;
obj_to_delete_selector =
constructor(__ENV.REGISTRY_FILE, 'obj_to_delete',
__ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0, {
status : 'created',
age : delete_age,
});
constructor(__ENV.REGISTRY_FILE, 'obj_to_delete',
__ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0, {
status: 'created',
age: delete_age,
});
}
const read_vu_count = parseInt(__ENV.READERS || '0');
if (read_vu_count > 0) {
scenarios.read = {
executor : 'constant-vus',
vus : read_vu_count,
duration : `${duration}s`,
exec : 'obj_read',
gracefulStop : '5s',
executor: 'constant-vus',
vus: read_vu_count,
duration: `${duration}s`,
exec: 'obj_read',
gracefulStop: '5s',
};
}
@ -103,21 +103,21 @@ const delete_vu_count = parseInt(__ENV.DELETERS || '0');
if (delete_vu_count > 0) {
if (!obj_to_delete_selector) {
throw new Error(
'Positive DELETE worker number without a proper object selector');
'Positive DELETE worker number without a proper object selector');
}
scenarios.delete = {
executor : 'constant-vus',
vus : delete_vu_count,
duration : `${duration}s`,
exec : 'obj_delete',
gracefulStop : '5s',
executor: 'constant-vus',
vus: delete_vu_count,
duration: `${duration}s`,
exec: 'obj_delete',
gracefulStop: '5s',
};
}
export const options = {
scenarios,
setupTimeout : '5s',
setupTimeout: '5s',
};
export function setup() {
@ -133,7 +133,7 @@ export function setup() {
const start_timestamp = Date.now()
console.log(
`Load started at: ${Date(start_timestamp).toString()}`)
`Load started at: ${Date(start_timestamp).toString()}`)
}
export function teardown(data) {
@ -142,13 +142,13 @@ export function teardown(data) {
}
const end_timestamp = Date.now()
console.log(
`Load finished at: ${Date(end_timestamp).toString()}`)
`Load finished at: ${Date(end_timestamp).toString()}`)
}
export function handleSummary(data) {
return {
'stdout' : textSummary(data, {indent : ' ', enableColors : false}),
[summary_json] : JSON.stringify(data),
'stdout': textSummary(data, { indent: ' ', enableColors: false }),
[summary_json]: JSON.stringify(data),
};
}
@ -157,13 +157,13 @@ export function obj_write() {
sleep(__ENV.SLEEP_WRITE);
}
const headers = {unique_header : uuidv4()};
const headers = { unique_header: uuidv4() };
const container =
container_list[Math.floor(Math.random() * container_list.length)];
container_list[Math.floor(Math.random() * container_list.length)];
const payload = generator.genPayload();
const resp =
grpc_client.put(container, headers, payload, write_grpc_chunk_size);
grpc_client.put(container, headers, payload, write_grpc_chunk_size);
if (!resp.success) {
log.withField('cid', container).error(resp.error);
return;
@ -186,7 +186,7 @@ export function obj_read() {
}
const resp = grpc_client.get(obj.c_id, obj.o_id)
if (!resp.success) {
log.withFields({cid : obj.c_id, oid : obj.o_id}).error(resp.error);
log.withFields({ cid: obj.c_id, oid: obj.o_id }).error(resp.error);
}
return
}
@ -194,7 +194,7 @@ export function obj_read() {
const obj = obj_list[Math.floor(Math.random() * obj_list.length)];
const resp = grpc_client.get(obj.container, obj.object)
if (!resp.success) {
log.withFields({cid : obj.container, oid : obj.object}).error(resp.error);
log.withFields({ cid: obj.container, oid: obj.object }).error(resp.error);
}
}
@ -214,7 +214,7 @@ export function obj_delete() {
const resp = grpc_client.delete(obj.c_id, obj.o_id);
if (!resp.success) {
// Log errors except (2052 - object already deleted)
log.withFields({cid : obj.c_id, oid : obj.o_id}).error(resp.error);
log.withFields({ cid: obj.c_id, oid: obj.o_id }).error(resp.error);
return;
}

View file

@ -1,22 +1,22 @@
import {sleep} from 'k6';
import {SharedArray} from 'k6/data';
import { sleep } from 'k6';
import { SharedArray } from 'k6/data';
import logging from 'k6/x/frostfs/logging';
import native from 'k6/x/frostfs/native';
import registry from 'k6/x/frostfs/registry';
import stats from 'k6/x/frostfs/stats';
import {newGenerator} from './libs/datagen.js';
import {parseEnv} from './libs/env-parser.js';
import {textSummary} from './libs/k6-summary-0.0.2.js';
import {uuidv4} from './libs/k6-utils-1.4.0.js';
import { newGenerator } from './libs/datagen.js';
import { parseEnv } from './libs/env-parser.js';
import { textSummary } from './libs/k6-summary-0.0.2.js';
import { uuidv4 } from './libs/k6-utils-1.4.0.js';
parseEnv();
const obj_list = new SharedArray('obj_list', function() {
const obj_list = new SharedArray('obj_list', function () {
return JSON.parse(open(__ENV.PREGEN_JSON)).objects;
});
const container_list = new SharedArray('container_list', function() {
const container_list = new SharedArray('container_list', function () {
return JSON.parse(open(__ENV.PREGEN_JSON)).containers;
});
@ -26,17 +26,17 @@ const summary_json = __ENV.SUMMARY_JSON || '/tmp/summary.json';
// Select random gRPC endpoint for current VU
const grpc_endpoints = __ENV.GRPC_ENDPOINTS.split(',');
const grpc_endpoint =
grpc_endpoints[Math.floor(Math.random() * grpc_endpoints.length)];
grpc_endpoints[Math.floor(Math.random() * grpc_endpoints.length)];
const grpc_client = native.connect(
grpc_endpoint, '', __ENV.DIAL_TIMEOUT ? parseInt(__ENV.DIAL_TIMEOUT) : 5,
__ENV.STREAM_TIMEOUT ? parseInt(__ENV.STREAM_TIMEOUT) : 60,
__ENV.PREPARE_LOCALLY ? __ENV.PREPARE_LOCALLY.toLowerCase() === 'true' :
false);
grpc_endpoint, '', __ENV.DIAL_TIMEOUT ? parseInt(__ENV.DIAL_TIMEOUT) : 5,
__ENV.STREAM_TIMEOUT ? parseInt(__ENV.STREAM_TIMEOUT) : 60,
__ENV.PREPARE_LOCALLY ? __ENV.PREPARE_LOCALLY.toLowerCase() === 'true' : false,
1024 * parseInt(__ENV.MAX_OBJECT_SIZE || '0'));
const log = logging.new().withField('endpoint', grpc_endpoint);
const registry_enabled = !!__ENV.REGISTRY_FILE;
const obj_registry =
registry_enabled ? registry.open(__ENV.REGISTRY_FILE) : undefined;
registry_enabled ? registry.open(__ENV.REGISTRY_FILE) : undefined;
const duration = __ENV.DURATION;
@ -48,22 +48,22 @@ const delete_age = __ENV.DELETE_AGE ? parseInt(__ENV.DELETE_AGE) : undefined;
let obj_to_delete_selector = undefined;
if (registry_enabled && delete_age) {
obj_to_delete_selector = registry.getSelector(
__ENV.REGISTRY_FILE, 'obj_to_delete',
__ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0, {
status: 'created',
age: delete_age,
});
__ENV.REGISTRY_FILE, 'obj_to_delete',
__ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0, {
status: 'created',
age: delete_age,
});
}
const read_age = __ENV.READ_AGE ? parseInt(__ENV.READ_AGE) : 10;
let obj_to_read_selector = undefined;
if (registry_enabled) {
obj_to_read_selector = registry.getLoopedSelector(
__ENV.REGISTRY_FILE, 'obj_to_read',
__ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0, {
status: 'created',
age: read_age,
})
__ENV.REGISTRY_FILE, 'obj_to_read',
__ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0, {
status: 'created',
age: read_age,
})
}
const scenarios = {};
@ -109,7 +109,7 @@ const delete_rate = parseInt(__ENV.DELETE_RATE || '0');
if (delete_rate > 0) {
if (!obj_to_delete_selector) {
throw new Error(
'Positive DELETE worker number without a proper object selector');
'Positive DELETE worker number without a proper object selector');
}
scenarios.delete = {
@ -131,7 +131,7 @@ export const options = {
export function setup() {
const total_pre_allocated_vu_count =
pre_alloc_write_vus + pre_alloc_read_vus + pre_alloc_delete_vus;
pre_alloc_write_vus + pre_alloc_read_vus + pre_alloc_delete_vus;
const total_max_vu_count = max_read_vus + max_write_vus + max_delete_vus
console.log(`Pregenerated containers: ${container_list.length}`);
@ -152,7 +152,7 @@ export function setup() {
const start_timestamp = Date.now()
console.log(
`Load started at: ${Date(start_timestamp).toString()}`)
`Load started at: ${Date(start_timestamp).toString()}`)
}
export function teardown(data) {
@ -161,12 +161,12 @@ export function teardown(data) {
}
const end_timestamp = Date.now()
console.log(
`Load finished at: ${Date(end_timestamp).toString()}`)
`Load finished at: ${Date(end_timestamp).toString()}`)
}
export function handleSummary(data) {
return {
'stdout': textSummary(data, {indent: ' ', enableColors: false}),
'stdout': textSummary(data, { indent: ' ', enableColors: false }),
[summary_json]: JSON.stringify(data),
};
}
@ -176,13 +176,13 @@ export function obj_write() {
sleep(__ENV.SLEEP_WRITE);
}
const headers = {unique_header: uuidv4()};
const headers = { unique_header: uuidv4() };
const container =
container_list[Math.floor(Math.random() * container_list.length)];
container_list[Math.floor(Math.random() * container_list.length)];
const payload = generator.genPayload();
const resp =
grpc_client.put(container, headers, payload, write_grpc_chunk_size);
grpc_client.put(container, headers, payload, write_grpc_chunk_size);
if (!resp.success) {
log.withField('cid', container).error(resp.error);
return;
@ -205,7 +205,7 @@ export function obj_read() {
}
const resp = grpc_client.get(obj.c_id, obj.o_id)
if (!resp.success) {
log.withFields({cid: obj.c_id, oid: obj.o_id}).error(resp.error);
log.withFields({ cid: obj.c_id, oid: obj.o_id }).error(resp.error);
}
return
}
@ -213,7 +213,7 @@ export function obj_read() {
const obj = obj_list[Math.floor(Math.random() * obj_list.length)];
const resp = grpc_client.get(obj.container, obj.object)
if (!resp.success) {
log.withFields({cid: obj.container, oid: obj.object}).error(resp.error);
log.withFields({ cid: obj.container, oid: obj.object }).error(resp.error);
}
}
@ -230,7 +230,7 @@ export function obj_delete() {
const resp = grpc_client.delete(obj.c_id, obj.o_id);
if (!resp.success) {
// Log errors except (2052 - object already deleted)
log.withFields({cid: obj.c_id, oid: obj.o_id}).error(resp.error);
log.withFields({ cid: obj.c_id, oid: obj.o_id }).error(resp.error);
return;
}

34
scenarios/libs/keygen.js Normal file
View file

@ -0,0 +1,34 @@
import { uuidv4 } from './k6-utils-1.4.0.js';
export function generateS3Key() {
let width = parseInt(__ENV.DIR_WIDTH || '0');
let height = parseInt(__ENV.DIR_HEIGHT || '0');
let key = ''
if (width > 0 && height > 0) {
for (let index = 0; index < height; index++) {
const w = Math.floor(Math.random() * width) + 1;
key = key + 'dir' + w + '/';
}
}
key += objName();
return key;
}
const asciiLetters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890"
function objName() {
if (__ENV.OBJ_NAME) {
return __ENV.OBJ_NAME;
}
const length = parseInt(__ENV.OBJ_NAME_LENGTH || '0');
if (length > 0) {
let name = "";
for (let i = 0; i < length; i++) {
name += asciiLetters.charAt(Math.floor(Math.random() * asciiLetters.length));
}
return name;
}
return uuidv4();
}

View file

@ -4,15 +4,16 @@ from helpers.cmd import execute_cmd, log
def create_bucket(endpoint, versioning, location, acl, no_verify_ssl):
configuration = ""
if location:
location = f"--create-bucket-configuration 'LocationConstraint={location}'"
configuration = f"--create-bucket-configuration 'LocationConstraint={location}'"
if acl:
acl = f"--acl {acl}"
bucket_name = str(uuid.uuid4())
no_verify_ssl_str = "--no-verify-ssl" if no_verify_ssl else ""
cmd_line = f"aws {no_verify_ssl_str} s3api create-bucket --bucket {bucket_name} " \
f"--endpoint {endpoint} {location} {acl} "
f"--endpoint {endpoint} {configuration} {acl} "
cmd_line_ver = f"aws {no_verify_ssl_str} s3api put-bucket-versioning --bucket {bucket_name} " \
f"--versioning-configuration Status=Enabled --endpoint {endpoint} {acl} "
@ -33,7 +34,7 @@ def create_bucket(endpoint, versioning, location, acl, no_verify_ssl):
else:
log(f"Bucket versioning has been applied for bucket {bucket_name}", endpoint)
log(f"Created bucket: {bucket_name}", endpoint)
log(f"Created bucket: {bucket_name} ({location})", endpoint)
return bucket_name

View file

@ -1,8 +1,8 @@
import re
from helpers.cmd import execute_cmd, log
def create_container(endpoint, policy, wallet_path, config, acl, local=False, depth=0):
if depth > 20:
def create_container(endpoint, policy, container_creation_retry, wallet_path, config, acl, local=False, retry=0):
if retry > int(container_creation_retry):
raise ValueError(f"unable to create container: too many unsuccessful attempts")
if wallet_path:
@ -34,7 +34,7 @@ def create_container(endpoint, policy, wallet_path, config, acl, local=False, de
raise ValueError(f"no CID was parsed from command output:\t{fst_str}")
cid = splitted[1]
log(f"Created container {cid}", endpoint)
log(f"Created container: {cid} ({policy})", endpoint)
if not local:
return cid
@ -88,7 +88,7 @@ def create_container(endpoint, policy, wallet_path, config, acl, local=False, de
return cid
log(f"Created container {cid} is not stored on {endpoint}, creating another one...", endpoint)
return create_container(endpoint, policy, wallet_path, config, acl, local, depth + 1)
return create_container(endpoint, policy, container_creation_retry, wallet_path, config, acl, local, retry + 1)
def upload_object(container, payload_filepath, endpoint, wallet_file, wallet_config):

View file

@ -15,18 +15,20 @@ from helpers.frostfs_cli import create_container, upload_object
ERROR_WRONG_CONTAINERS_COUNT = 1
ERROR_WRONG_OBJECTS_COUNT = 2
MAX_WORKERS = 50
DEFAULT_POLICY = "REP 2 IN X CBF 2 SELECT 2 FROM * AS X"
parser = argparse.ArgumentParser()
parser.add_argument('--size', help='Upload objects size in kb')
parser.add_argument('--containers', help='Number of containers to create')
parser.add_argument('--retry', default=20, help='Maximum number of retries to create a container')
parser.add_argument('--out', help='JSON file with output')
parser.add_argument('--preload_obj', help='Number of pre-loaded objects')
parser.add_argument('--wallet', help='Wallet file path')
parser.add_argument('--config', help='Wallet config file path')
parser.add_argument(
"--policy",
help="Container placement policy",
default="REP 2 IN X CBF 2 SELECT 2 FROM * AS X"
help=f"Container placement policy. Default is {DEFAULT_POLICY}",
action="append"
)
parser.add_argument('--endpoint', help='Nodes addresses separated by comma.')
parser.add_argument('--update', help='Save existed containers')
@ -46,7 +48,10 @@ def main():
objects_list = []
endpoints = args.endpoint.split(',')
if not args.policy:
args.policy = [DEFAULT_POLICY]
container_creation_retry = args.retry
wallet = args.wallet
wallet_config = args.config
workers = int(args.workers)
@ -63,9 +68,9 @@ def main():
containers_count = int(args.containers)
print(f"Create containers: {containers_count}")
with ProcessPoolExecutor(max_workers=min(MAX_WORKERS, workers)) as executor:
containers_runs = [executor.submit(create_container, endpoint, args.policy, wallet, wallet_config, args.acl, args.local)
for _, endpoint in
zip(range(containers_count), cycle(endpoints))]
containers_runs = [executor.submit(create_container, endpoint, policy, container_creation_retry, wallet, wallet_config, args.acl, args.local)
for _, endpoint, policy in
zip(range(containers_count), cycle(endpoints), cycle(args.policy))]
for run in containers_runs:
container_id = run.result()

View file

@ -11,6 +11,11 @@ from concurrent.futures import ProcessPoolExecutor
from helpers.cmd import random_payload
from helpers.aws_cli import create_bucket, upload_object
ERROR_WRONG_CONTAINERS_COUNT = 1
ERROR_WRONG_OBJECTS_COUNT = 2
MAX_WORKERS = 50
DEFAULT_LOCATION = ""
parser = argparse.ArgumentParser()
parser.add_argument('--size', help='Upload objects size in kb.')
@ -20,7 +25,7 @@ parser.add_argument('--preload_obj', help='Number of pre-loaded objects.')
parser.add_argument('--endpoint', help='S3 Gateways addresses separated by comma.')
parser.add_argument('--update', help='True/False, False by default. Save existed buckets from target file (--out). '
'New buckets will not be created.')
parser.add_argument('--location', help='AWS location. Will be empty, if has not be declared.', default="")
parser.add_argument('--location', help=f'AWS location constraint. Default is "{DEFAULT_LOCATION}"', action="append")
parser.add_argument('--versioning', help='True/False, False by default.')
parser.add_argument('--ignore-errors', help='Ignore preset errors', action='store_true')
parser.add_argument('--no-verify-ssl', help='Ignore SSL verifications', action='store_true')
@ -32,10 +37,6 @@ parser.add_argument('--acl', help='Bucket ACL. Default is private. Expected valu
args = parser.parse_args()
print(args)
ERROR_WRONG_CONTAINERS_COUNT = 1
ERROR_WRONG_OBJECTS_COUNT = 2
MAX_WORKERS = 50
def main():
buckets = []
objects_list = []
@ -43,6 +44,8 @@ def main():
no_verify_ssl = args.no_verify_ssl
endpoints = args.endpoint.split(',')
if not args.location:
args.location = [DEFAULT_LOCATION]
workers = int(args.workers)
objects_per_bucket = int(args.preload_obj)
@ -59,9 +62,9 @@ def main():
print(f"Create buckets: {buckets_count}")
with ProcessPoolExecutor(max_workers=min(MAX_WORKERS, workers)) as executor:
buckets_runs = [executor.submit(create_bucket, endpoint, args.versioning, args.location, args.acl, no_verify_ssl)
for _, endpoint in
zip(range(buckets_count), cycle(endpoints))]
buckets_runs = [executor.submit(create_bucket, endpoint, args.versioning, location, args.acl, no_verify_ssl)
for _, endpoint, location in
zip(range(buckets_count), cycle(endpoints), cycle(args.location))]
for run in buckets_runs:
bucket_name = run.result()

View file

@ -138,6 +138,8 @@ Options (in addition to the common options):
* `DELETE_AGE` - age of object in seconds before which it can not be deleted. This parameter can be used to control how many objects we have in the system under load.
* `SLEEP_DELETE` - time interval (in seconds) between deleting VU iterations.
* `OBJ_NAME` - if specified, this name will be used for all write operations instead of random generation.
* `OBJ_NAME_LENGTH` - if specified, then name of the object will be generated with the specified length of ASCII characters.
* `DIR_HEIGHT`, `DIR_WIDTH` - if both specified, object name will consist of `DIR_HEIGHT` directories, each of which can have `DIR_WIDTH` subdirectories, for example for `DIR_HEIGHT = 3, DIR_WIDTH = 100`, object names will be `/dir{1...100}/dir{1...100}/dir{1...100}/{uuid || OBJ_NAME}`
## S3 Multipart

View file

@ -6,10 +6,10 @@ import registry from 'k6/x/frostfs/registry';
import s3 from 'k6/x/frostfs/s3';
import stats from 'k6/x/frostfs/stats';
import {newGenerator} from './libs/datagen.js';
import {generateS3Key} from './libs/keygen.js';
import {parseEnv} from './libs/env-parser.js';
import {textSummary} from './libs/k6-summary-0.0.2.js';
import {uuidv4} from './libs/k6-utils-1.4.0.js';
import {newGenerator} from './libs/datagen.js';
parseEnv();
@ -132,6 +132,10 @@ export function setup() {
const start_timestamp = Date.now()
console.log(
`Load started at: ${Date(start_timestamp).toString()}`)
if (delete_vu_count > 0){
obj_to_delete_selector.sync.add(delete_vu_count)
}
}
export function teardown(data) {
@ -155,7 +159,7 @@ export function obj_write() {
sleep(__ENV.SLEEP_WRITE);
}
const key = __ENV.OBJ_NAME || uuidv4();
const key = generateS3Key();
const bucket = bucket_list[Math.floor(Math.random() * bucket_list.length)];
const payload = generator.genPayload();
@ -204,6 +208,8 @@ export function obj_delete() {
const obj = obj_to_delete_selector.nextObject();
if (!obj) {
if (obj_to_delete_exit_on_null) {
obj_to_delete_selector.sync.done()
obj_to_delete_selector.sync.wait()
exec.test.abort("No more objects to select");
}
return;

View file

@ -5,10 +5,10 @@ import registry from 'k6/x/frostfs/registry';
import s3 from 'k6/x/frostfs/s3';
import stats from 'k6/x/frostfs/stats';
import {generateS3Key} from './libs/keygen.js';
import {newGenerator} from './libs/datagen.js';
import {parseEnv} from './libs/env-parser.js';
import {textSummary} from './libs/k6-summary-0.0.2.js';
import {uuidv4} from './libs/k6-utils-1.4.0.js';
parseEnv();
@ -177,7 +177,7 @@ export function obj_write() {
sleep(__ENV.SLEEP_WRITE);
}
const key = __ENV.OBJ_NAME || uuidv4();
const key = generateS3Key();
const bucket = bucket_list[Math.floor(Math.random() * bucket_list.length)];
const payload = generator.genPayload();

View file

@ -6,10 +6,10 @@ import registry from 'k6/x/frostfs/registry';
import s3 from 'k6/x/frostfs/s3';
import stats from 'k6/x/frostfs/stats';
import {generateS3Key} from './libs/keygen.js';
import {newGenerator} from './libs/datagen.js';
import {parseEnv} from './libs/env-parser.js';
import {textSummary} from './libs/k6-summary-0.0.2.js';
import {uuidv4} from './libs/k6-utils-1.4.0.js';
parseEnv();
@ -71,11 +71,23 @@ if (write_vu_count > 0) {
};
}
const read_vu_count = parseInt(__ENV.READERS || '0');
if (read_vu_count > 0) {
scenarios.read = {
executor : 'constant-vus',
vus : read_vu_count,
duration : `${duration}s`,
exec : 'obj_read',
gracefulStop : '5s',
};
}
const delete_age = __ENV.DELETE_AGE ? parseInt(__ENV.DELETE_AGE) : undefined;
let obj_to_delete_selector = undefined;
let obj_to_delete_exit_on_null = undefined;
if (registry_enabled && delete_age) {
obj_to_delete_exit_on_null = write_vu_count == 0;
if (registry_enabled ) {
obj_to_delete_exit_on_null = (write_vu_count == 0) && (read_vu_count == 0)
let constructor = obj_to_delete_exit_on_null ? registry.getOneshotSelector
: registry.getSelector;
@ -88,16 +100,7 @@ if (registry_enabled && delete_age) {
});
}
const read_vu_count = parseInt(__ENV.READERS || '0');
if (read_vu_count > 0) {
scenarios.read = {
executor : 'constant-vus',
vus : read_vu_count,
duration : `${duration}s`,
exec : 'obj_read',
gracefulStop : '5s',
};
}
const delete_vu_count = parseInt(__ENV.DELETERS || '0');
if (delete_vu_count > 0) {
@ -156,7 +159,7 @@ export function obj_write() {
sleep(__ENV.SLEEP_WRITE);
}
const key = __ENV.OBJ_NAME || uuidv4();
const key = generateS3Key();
const bucket = bucket_list[Math.floor(Math.random() * bucket_list.length)];
const payload = generator.genPayload();

View file

@ -5,10 +5,10 @@ import registry from 'k6/x/frostfs/registry';
import s3 from 'k6/x/frostfs/s3';
import stats from 'k6/x/frostfs/stats';
import {generateS3Key} from './libs/keygen.js';
import {newGenerator} from './libs/datagen.js';
import {parseEnv} from './libs/env-parser.js';
import {textSummary} from './libs/k6-summary-0.0.2.js';
import {uuidv4} from './libs/k6-utils-1.4.0.js';
parseEnv();
@ -101,7 +101,7 @@ export function obj_write_multipart() {
sleep(__ENV.SLEEP_WRITE);
}
const key = __ENV.OBJ_NAME || uuidv4();
const key = generateS3Key();
const bucket = bucket_list[Math.floor(Math.random() * bucket_list.length)];
const payload = generator.genPayload();

View file

@ -5,6 +5,7 @@ import registry from 'k6/x/frostfs/registry';
import s3local from 'k6/x/frostfs/s3local';
import stats from 'k6/x/frostfs/stats';
import {generateS3Key} from './libs/keygen.js';
import {newGenerator} from './libs/datagen.js';
import {parseEnv} from './libs/env-parser.js';
import {textSummary} from './libs/k6-summary-0.0.2.js';
@ -131,7 +132,7 @@ export function handleSummary(data) {
}
export function obj_write() {
const key = __ENV.OBJ_NAME || uuidv4();
const key = generateS3Key();
const bucket = bucket_list[Math.floor(Math.random() * bucket_list.length)];
const payload = generator.genPayload();

View file

@ -1,13 +1,13 @@
import {sleep} from 'k6';
import {Counter} from 'k6/metrics';
import { sleep } from 'k6';
import { Counter } from 'k6/metrics';
import logging from 'k6/x/frostfs/logging';
import native from 'k6/x/frostfs/native';
import registry from 'k6/x/frostfs/registry';
import s3 from 'k6/x/frostfs/s3';
import stats from 'k6/x/frostfs/stats';
import {parseEnv} from './libs/env-parser.js';
import {textSummary} from './libs/k6-summary-0.0.2.js';
import { parseEnv } from './libs/env-parser.js';
import { textSummary } from './libs/k6-summary-0.0.2.js';
parseEnv();
@ -39,24 +39,23 @@ let grpc_client = undefined;
if (__ENV.GRPC_ENDPOINTS) {
const grpcEndpoints = __ENV.GRPC_ENDPOINTS.split(',');
const grpcEndpoint =
grpcEndpoints[Math.floor(Math.random() * grpcEndpoints.length)];
grpcEndpoints[Math.floor(Math.random() * grpcEndpoints.length)];
log = log.withField('endpoint', grpcEndpoint);
grpc_client = native.connect(
grpcEndpoint, '', __ENV.DIAL_TIMEOUT ? parseInt(__ENV.DIAL_TIMEOUT) : 0,
__ENV.STREAM_TIMEOUT ? parseInt(__ENV.STREAM_TIMEOUT) : 0,
__ENV.PREPARE_LOCALLY ? __ENV.PREPARE_LOCALLY.toLowerCase() === 'true' :
false,
'');
grpcEndpoint, '', __ENV.DIAL_TIMEOUT ? parseInt(__ENV.DIAL_TIMEOUT) : 0,
__ENV.STREAM_TIMEOUT ? parseInt(__ENV.STREAM_TIMEOUT) : 0,
__ENV.PREPARE_LOCALLY ? __ENV.PREPARE_LOCALLY.toLowerCase() === 'true' : false,
1024 * parseInt(__ENV.MAX_OBJECT_SIZE || '0'));
}
// Connect to random S3 endpoint
let s3_client = undefined;
if (__ENV.S3_ENDPOINTS) {
const no_verify_ssl = __ENV.NO_VERIFY_SSL || 'true';
const connection_args = {no_verify_ssl: no_verify_ssl};
const connection_args = { no_verify_ssl: no_verify_ssl };
const s3_endpoints = __ENV.S3_ENDPOINTS.split(',');
const s3_endpoint =
s3_endpoints[Math.floor(Math.random() * s3_endpoints.length)];
s3_endpoints[Math.floor(Math.random() * s3_endpoints.length)];
log = log.withField('endpoint', s3_endpoint);
s3_client = s3.connect(s3_endpoint, connection_args);
}
@ -65,10 +64,10 @@ if (__ENV.S3_ENDPOINTS) {
// execute as many iterations as there are objects. Each object will have 3
// retries to be verified
const obj_to_verify_selector = registry.getSelector(
__ENV.REGISTRY_FILE, 'obj_to_verify',
__ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0, {
status: 'created',
});
__ENV.REGISTRY_FILE, 'obj_to_verify',
__ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0, {
status: 'created',
});
const obj_to_verify_count = obj_to_verify_selector.count();
// Execute at least one iteration (executor shared-iterations can't run 0
// iterations)
@ -97,15 +96,15 @@ export function setup() {
// Populate counters with initial values
for (const [status, counter] of Object.entries(obj_counters)) {
const obj_selector = registry.getSelector(
__ENV.REGISTRY_FILE, status,
__ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0, {status});
__ENV.REGISTRY_FILE, status,
__ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0, { status });
counter.add(obj_selector.count());
}
}
export function handleSummary(data) {
return {
'stdout': textSummary(data, {indent: ' ', enableColors: false}),
'stdout': textSummary(data, { indent: ' ', enableColors: false }),
[summary_json]: JSON.stringify(data),
};
}
@ -138,19 +137,19 @@ function verify_object_with_retries(obj, attempts) {
// ReferenceError: Cannot access a variable before initialization.
let lg = log;
if (obj.c_id && obj.o_id) {
lg = lg.withFields({cid: obj.c_id, oid: obj.o_id});
lg = lg.withFields({ cid: obj.c_id, oid: obj.o_id });
result = grpc_client.verifyHash(obj.c_id, obj.o_id, obj.payload_hash);
} else if (obj.s3_bucket && obj.s3_key) {
lg = lg.withFields({bucket: obj.s3_bucket, key: obj.s3_key});
lg = lg.withFields({ bucket: obj.s3_bucket, key: obj.s3_key });
result =
s3_client.verifyHash(obj.s3_bucket, obj.s3_key, obj.payload_hash);
s3_client.verifyHash(obj.s3_bucket, obj.s3_key, obj.payload_hash);
} else {
lg.withFields({
cid: obj.c_id,
oid: obj.o_id,
bucket: obj.s3_bucket,
key: obj.s3_key
}).warn(`Object cannot be verified with supported protocols`);
cid: obj.c_id,
oid: obj.o_id,
bucket: obj.s3_bucket,
key: obj.s3_key
}).warn(`Object cannot be verified with supported protocols`);
return 'skipped';
}