Compare commits

..

2 commits

Author SHA1 Message Date
025facee96 [#13] Allow to use english text in the payload
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2023-06-25 13:22:57 +03:00
c56bbf04d5 [#15] go.mod: Tidy
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2023-06-25 13:22:57 +03:00
71 changed files with 1962 additions and 3936 deletions

View file

@ -1,21 +0,0 @@
name: DCO action
on: [pull_request]
jobs:
dco:
name: DCO
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Setup Go
uses: actions/setup-go@v3
with:
go-version: '1.22'
- name: Run commit format checker
uses: https://git.frostfs.info/TrueCloudLab/dco-go@v2
with:
from: 'origin/${{ github.event.pull_request.base.ref }}'

View file

@ -1,56 +0,0 @@
name: Tests and linters
on: [pull_request]
jobs:
lint:
name: Lint
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version: '1.23'
cache: true
- name: Install linters
run: make lint-install
- name: Run linters
run: make lint
tests:
name: Tests
runs-on: ubuntu-latest
strategy:
matrix:
go_versions: [ '1.22', '1.23' ]
fail-fast: false
steps:
- uses: actions/checkout@v3
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version: '${{ matrix.go_versions }}'
cache: true
- name: Run tests
run: make test
tests-race:
name: Tests with -race
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version: '1.22'
cache: true
- name: Run tests
run: go test ./... -count=1 -race

View file

Before

Width:  |  Height:  |  Size: 5.5 KiB

After

Width:  |  Height:  |  Size: 5.5 KiB

21
.github/workflows/dco.yml vendored Normal file
View file

@ -0,0 +1,21 @@
name: DCO check
on:
pull_request:
branches:
- master
jobs:
commits_check_job:
runs-on: ubuntu-latest
name: Commits Check
steps:
- name: Get PR Commits
id: 'get-pr-commits'
uses: tim-actions/get-pr-commits@master
with:
token: ${{ secrets.GITHUB_TOKEN }}
- name: DCO Check
uses: tim-actions/dco@master
with:
commits: ${{ steps.get-pr-commits.outputs.commits }}

34
.github/workflows/go.yml vendored Normal file
View file

@ -0,0 +1,34 @@
name: Tests
on:
pull_request:
branches:
- master
types: [opened, synchronize]
paths-ignore:
- '**/*.md'
workflow_dispatch:
jobs:
lint:
name: Lint
runs-on: ubuntu-20.04
steps:
- name: Check out code
uses: actions/checkout@v2
- name: golangci-lint
uses: golangci/golangci-lint-action@v2
with:
version: latest
args: --timeout=2m
tests:
name: Tests
runs-on: ubuntu-20.04
strategy:
matrix:
go_versions: [ '1.17', '1.18', '1.19' ]
fail-fast: false
steps:
- uses: actions/checkout@v3

3
.gitignore vendored
View file

@ -1,6 +1,3 @@
k6 k6
*.bolt *.bolt
presets presets
bin
# Preset script artifacts.
__pycache__

View file

@ -3,8 +3,8 @@
First, thank you for contributing! We love and encourage pull requests from First, thank you for contributing! We love and encourage pull requests from
everyone. Please follow the guidelines: everyone. Please follow the guidelines:
- Check the open [issues](https://git.frostfs.info/TrueCloudLab/xk6-frostfs/issues) and - Check the open [issues](https://github.com/TrueCloudLab/xk6-frostfs/issues) and
[pull requests](https://git.frostfs.info/TrueCloudLab/xk6-frostfs/pulls) for existing [pull requests](https://github.com/TrueCloudLab/xk6-frostfs/pulls) for existing
discussions. discussions.
- Open an issue first, to discuss a new feature or enhancement. - Open an issue first, to discuss a new feature or enhancement.
@ -27,20 +27,19 @@ Start by forking the `xk6-frostfs` repository, make changes in a branch and then
send a pull request. We encourage pull requests to discuss code changes. Here send a pull request. We encourage pull requests to discuss code changes. Here
are the steps in details: are the steps in details:
### Set up your repository ### Set up your GitHub Repository
Fork [xk6-frostfs upstream](https://github.com/TrueCloudLab/xk6-frostfs/fork) source
Fork [xk6-frostfs upstream](https://git.frostfs.info/TrueCloudLab/xk6-frostfs/fork) source
repository to your own personal repository. Copy the URL of your fork (you will repository to your own personal repository. Copy the URL of your fork (you will
need it for the `git clone` command below). need it for the `git clone` command below).
```sh ```sh
$ git clone https://git.frostfs.info/TrueCloudLab/xk6-frostfs $ git clone https://github.com/TrueCloudLab/xk6-frostfs
``` ```
### Set up git remote as ``upstream`` ### Set up git remote as ``upstream``
```sh ```sh
$ cd xk6-frostfs $ cd xk6-frostfs
$ git remote add upstream https://git.frostfs.info/TrueCloudLab/xk6-frostfs $ git remote add upstream https://github.com/TrueCloudLab/xk6-frostfs
$ git fetch upstream $ git fetch upstream
$ git merge upstream/master $ git merge upstream/master
... ...
@ -90,7 +89,7 @@ $ git push origin feature/123-something_awesome
``` ```
### Create a Pull Request ### Create a Pull Request
Pull requests can be created via git.frostfs.info. Refer to [this Pull requests can be created via GitHub. Refer to [this
document](https://help.github.com/articles/creating-a-pull-request/) for document](https://help.github.com/articles/creating-a-pull-request/) for
detailed steps on how to create a pull request. After a Pull Request gets peer detailed steps on how to create a pull request. After a Pull Request gets peer
reviewed and approved, it will be merged. reviewed and approved, it will be merged.

114
Makefile
View file

@ -1,114 +0,0 @@
#!/usr/bin/make -f
# Common variables
REPO ?= $(shell go list -m)
VERSION ?= $(shell git describe --tags --dirty --match "v*" --always --abbrev=8 2>/dev/null || cat VERSION 2>/dev/null || echo "develop")
GO_VERSION ?= 1.22
LINT_VERSION ?= 1.60.3
TRUECLOUDLAB_LINT_VERSION ?= 0.0.7
BINDIR = bin
OUTPUT_LINT_DIR ?= $(abspath $(BINDIR))/linters
LINT_DIR = $(OUTPUT_LINT_DIR)/golangci-lint-$(LINT_VERSION)-v$(TRUECLOUDLAB_LINT_VERSION)
TMP_DIR := .cache
# Binaries to build
CMDS = $(addprefix frostfs-, $(notdir $(wildcard cmd/*)))
BINS = $(addprefix $(BINDIR)/, $(CMDS))
.PHONY: all $(BINS) $(BINDIR) dep docker/ test cover format lint docker/lint pre-commit unpre-commit version clean
# Make all binaries
all: $(BINS)
$(BINS): $(BINDIR) dep
@echo "⇒ Build $@"
CGO_ENABLED=0 \
go build -v -trimpath \
-ldflags "-X $(REPO)/internal/version.Version=$(VERSION)" \
-o $@ ./cmd/$(subst frostfs-,,$(notdir $@))
$(BINDIR):
@echo "⇒ Ensure dir: $@"
@mkdir -p $@
# Pull go dependencies
dep:
@printf "⇒ Download requirements: "
@CGO_ENABLED=0 \
go mod download && echo OK
@printf "⇒ Tidy requirements: "
@CGO_ENABLED=0 \
go mod tidy -v && echo OK
# Run `make %` in Golang container, for more information run `make help.docker/%`
docker/%:
$(if $(filter $*,all $(BINS)), \
@echo "=> Running 'make $*' in clean Docker environment" && \
docker run --rm -t \
-v `pwd`:/src \
-w /src \
-u `stat -c "%u:%g" .` \
--env HOME=/src \
golang:$(GO_VERSION) make $*,\
@echo "supported docker targets: all $(BINS) lint")
# Run tests
test:
@go test ./... -cover
# Run tests with race detection and produce coverage output
cover:
@go test -v -race ./... -coverprofile=coverage.txt -covermode=atomic
@go tool cover -html=coverage.txt -o coverage.html
# Reformat code
format:
@echo "⇒ Processing gofmt check"
@gofmt -s -w ./
# Run linters
lint:
@if [ ! -d "$(LINT_DIR)" ]; then \
make lint-install; \
fi
$(LINT_DIR)/golangci-lint run --timeout=5m
# Install linters
lint-install:
@rm -rf $(OUTPUT_LINT_DIR)
@mkdir -p $(OUTPUT_LINT_DIR)
@mkdir -p $(TMP_DIR)
@rm -rf $(TMP_DIR)/linters
@git -c advice.detachedHead=false clone --branch v$(TRUECLOUDLAB_LINT_VERSION) https://git.frostfs.info/TrueCloudLab/linters.git $(TMP_DIR)/linters
@@make -C $(TMP_DIR)/linters lib CGO_ENABLED=1 OUT_DIR=$(OUTPUT_LINT_DIR)
@rm -rf $(TMP_DIR)/linters
@rmdir $(TMP_DIR) 2>/dev/null || true
@CGO_ENABLED=1 GOBIN=$(LINT_DIR) go install -trimpath github.com/golangci/golangci-lint/cmd/golangci-lint@v$(LINT_VERSION)
# Run linters in Docker
docker/lint:
docker run --rm -it \
-v `pwd`:/src \
-u `stat -c "%u:%g" .` \
--env HOME=/src \
golangci/golangci-lint:v$(LINT_VERSION) bash -c 'cd /src/ && make lint'
# Activate pre-commit hooks
pre-commit:
pre-commit install -t pre-commit -t commit-msg
# Deactivate pre-commit hooks
unpre-commit:
pre-commit uninstall -t pre-commit -t commit-msg
# Show current version
version:
@echo $(VERSION)
# Clean up files
clean:
rm -rf .cache
rm -rf $(BINDIR)
include help.mk

View file

@ -1,5 +1,5 @@
<p align="center"> <p align="center">
<img src="./.forgejo/logo.svg" width="500px" alt="FrostFS logo"> <img src="./.github/logo.svg" width="500px" alt="FrostFS logo">
</p> </p>
<p align="center"> <p align="center">
<a href="https://go.k6.io/k6">k6</a> extension to test and benchmark FrostFS related protocols. <a href="https://go.k6.io/k6">k6</a> extension to test and benchmark FrostFS related protocols.
@ -47,17 +47,15 @@ Create native client with `connect` method. Arguments:
- hex encoded private key (empty value produces random key) - hex encoded private key (empty value produces random key)
- dial timeout in seconds (0 for the default value) - dial timeout in seconds (0 for the default value)
- stream timeout in seconds (0 for the default value) - stream timeout in seconds (0 for the default value)
- generate object header on the client side (for big object - split locally too)
- max size for generated object header on the client side (for big object - the size that the object is splitted into)
```js ```js
import native from 'k6/x/frostfs/native'; import native from 'k6/x/frostfs/native';
const frostfs_cli = native.connect("s01.frostfs.devenv:8080", "", 0, 0, false, 0) const frostfs_cli = native.connect("s01.frostfs.devenv:8080", "", 0, 0)
``` ```
### Methods ### Methods
- `putContainer(params)`. The `params` is a dictionary (e.g. - `putContainer(params)`. The `params` is a dictionary (e.g.
`{placement_policy:'REP 3',name:'container-name',name_global_scope:'false'}`). `{acl:'public-read-write',placement_policy:'REP 3',name:'container-name',name_global_scope:'false'}`).
Returns dictionary with `success` Returns dictionary with `success`
boolean flag, `container_id` string, and `error` string. boolean flag, `container_id` string, and `error` string.
- `setBufferSize(size)`. Sets internal buffer size for data upload and - `setBufferSize(size)`. Sets internal buffer size for data upload and
@ -75,13 +73,12 @@ const frostfs_cli = native.connect("s01.frostfs.devenv:8080", "", 0, 0, false, 0
Create a local client with `connect` method. Arguments: Create a local client with `connect` method. Arguments:
- local path to frostfs storage node configuration file - local path to frostfs storage node configuration file
- local path to frostfs storage node configuration directory
- hex encoded private key (empty value produces random key) - hex encoded private key (empty value produces random key)
- whether to use the debug logger (warning: very verbose) - whether to use the debug logger (warning: very verbose)
```js ```js
import local from 'k6/x/frostfs/local'; import local from 'k6/x/frostfs/local';
const local_client = local.connect("/path/to/config.yaml", "/path/to/config/dir", "", false) const local_client = local.connect("/path/to/config.yaml", "", false)
``` ```
### Methods ### Methods
@ -101,13 +98,13 @@ Credentials are taken from default AWS configuration files and ENVs.
```js ```js
import s3 from 'k6/x/frostfs/s3'; import s3 from 'k6/x/frostfs/s3';
const s3_cli = s3.connect("https://s3.frostfs.devenv:8080") const s3_cli = s3.connect("http://s3.frostfs.devenv:8080")
``` ```
You can also provide additional options: You can also provide additional options:
```js ```js
import s3 from 'k6/x/frostfs/s3'; import s3 from 'k6/x/frostfs/s3';
const s3_cli = s3.connect("https://s3.frostfs.devenv:8080", {'no_verify_ssl': 'true', 'timeout': '60s'}) const s3_cli = s3.connect("http://s3.frostfs.devenv:8080", {'no_verify_ssl': 'true', 'timeout': '60s'})
``` ```
* `no_verify_ss` - Bool. If `true` - skip verifying the s3 certificate chain and host name (useful if s3 uses self-signed certificates) * `no_verify_ss` - Bool. If `true` - skip verifying the s3 certificate chain and host name (useful if s3 uses self-signed certificates)
@ -125,7 +122,6 @@ const s3_cli = s3.connect("https://s3.frostfs.devenv:8080", {'no_verify_ssl': 't
Create local s3 client with `connect` method. Arguments: Create local s3 client with `connect` method. Arguments:
- local path to frostfs storage node configuration file - local path to frostfs storage node configuration file
- local path to frostfs storage node configuration directory
- parameter map with the following options: - parameter map with the following options:
* `hex_key`: private key to use as a hexadecimal string. A random one is created if none is provided. * `hex_key`: private key to use as a hexadecimal string. A random one is created if none is provided.
* `node_position`: position of this node in the node array if loading multiple nodes independently (default: 0). * `node_position`: position of this node in the node array if loading multiple nodes independently (default: 0).
@ -138,7 +134,7 @@ Create local s3 client with `connect` method. Arguments:
import local from 'k6/x/frostfs/local'; import local from 'k6/x/frostfs/local';
const params = {'node_position': 1, 'node_count': 3} const params = {'node_position': 1, 'node_count': 3}
const bucketMapping = {'mytestbucket': 'GBQDDUM1hdodXmiRHV57EUkFWJzuntsG8BG15wFSwam6'} const bucketMapping = {'mytestbucket': 'GBQDDUM1hdodXmiRHV57EUkFWJzuntsG8BG15wFSwam6'}
const local_client = local.connect("/path/to/config.yaml", "/path/to/config/dir", params, bucketMapping) const local_client = local.connect("/path/to/config.yaml", params, bucketMapping)
``` ```
### Methods ### Methods
@ -151,60 +147,6 @@ const local_client = local.connect("/path/to/config.yaml", "/path/to/config/dir"
See native protocol and s3 test suite examples in [examples](./examples) dir. See native protocol and s3 test suite examples in [examples](./examples) dir.
# Command line utils
To build all command line utils just run:
```shell
$ make
```
All binaries will be in `bin` directory.
## Export registry db
You can export registry bolt db to json file, that can be used as pregen for scenarios (see [docs](./scenarios/run_scenarios.md)).
To do this use `frostfs-xk6-registry-exporter`, available flags can be seen in help:
```shell
$ ./bin/frostfs-xk6-registry-exporter -h
Registry exporter for xk6
Usage:
registry-exporter [flags]
Examples:
registry-exporter registry.bolt
registry-exporter --status created --out out.json registry.bolt
Flags:
--age int Object age
--format string Output format (default "json")
-h, --help help for registry-exporter
--out string Path to output file (default "dumped-registry.json")
--status string Object status (default "created")
-v, --version version for registry-exporter
```
## Import pregen into registry db
You can import pregenerated json files into registry bolt db. Use `frostfs-xk6-registry import`. Usage examples are in help:
```shell
$ ./bin/frostfs-xk6-registry import -h
Import objects into registry from pregenerated files
Usage:
xk6-registry import [flags]
Examples:
xk6-registry import registry.bolt preset.json
xk6-registry import registry.bolt preset.json another_preset.json
Flags:
-h, --help help for import
```
# License # License
- [GNU General Public License v3.0](LICENSE) - [GNU General Public License v3.0](LICENSE)

View file

@ -1,18 +0,0 @@
package main
import (
"context"
"os"
"os/signal"
"syscall"
)
func main() {
ctx, _ := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM, syscall.SIGHUP)
if cmd, err := rootCmd.ExecuteContextC(ctx); err != nil {
cmd.PrintErrln("Error:", err.Error())
cmd.PrintErrf("Run '%v --help' for usage.\n", cmd.CommandPath())
os.Exit(1)
}
}

View file

@ -1,89 +0,0 @@
package main
import (
"fmt"
"runtime"
"git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/registry"
"git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/version"
"github.com/spf13/cobra"
)
var rootCmd = &cobra.Command{
Use: "registry-exporter",
Version: version.Version,
Short: "Registry exporter",
Long: "Registry exporter for xk6",
Example: `registry-exporter registry.bolt
registry-exporter --status created --out out.json registry.bolt`,
SilenceErrors: true,
SilenceUsage: true,
RunE: rootCmdRun,
}
const (
outFlag = "out"
formatFlag = "format"
statusFlag = "status"
ageFlag = "age"
)
const (
defaultOutPath = "dumped-registry.json"
jsonFormat = "json"
createdStatus = "created"
)
func init() {
rootCmd.Flags().String(outFlag, defaultOutPath, "Path to output file")
rootCmd.Flags().String(formatFlag, jsonFormat, "Output format")
rootCmd.Flags().String(statusFlag, createdStatus, "Object status")
rootCmd.Flags().Int(ageFlag, 0, "Object age")
cobra.AddTemplateFunc("runtimeVersion", runtime.Version)
rootCmd.SetVersionTemplate(`FrostFS xk6 Registry Exporter
{{printf "Version: %s" .Version }}
GoVersion: {{ runtimeVersion }}
`)
}
func rootCmdRun(cmd *cobra.Command, args []string) error {
if len(args) != 1 {
return fmt.Errorf("expected exacly one non-flag argumet: path to the registry, got: %s", args)
}
format, err := cmd.Flags().GetString(formatFlag)
if err != nil {
return fmt.Errorf("get '%s' flag: %w", formatFlag, err)
}
if format != jsonFormat {
return fmt.Errorf("unknown format '%s', only '%s' is supported", format, jsonFormat)
}
out, err := cmd.Flags().GetString(outFlag)
if err != nil {
return fmt.Errorf("get '%s' flag: %w", outFlag, err)
}
status, err := cmd.Flags().GetString(statusFlag)
if err != nil {
return fmt.Errorf("get '%s' flag: %w", statusFlag, err)
}
age, err := cmd.Flags().GetInt(ageFlag)
if err != nil {
return fmt.Errorf("get '%s' flag: %w", ageFlag, err)
}
objRegistry := registry.NewObjRegistry(cmd.Context(), args[0])
objSelector := registry.NewObjSelector(objRegistry, 0, registry.SelectorAwaiting, &registry.ObjFilter{
Status: status,
Age: age,
})
objExporter := registry.NewObjExporter(objSelector)
cmd.Println("Writing result file:", out)
return objExporter.ExportJSONPreGen(out)
}

View file

@ -1,55 +0,0 @@
package importer
import (
"encoding/json"
"os"
"git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/registry"
)
type PreGenObj struct {
Bucket string `json:"bucket"`
Object string `json:"object"`
Container string `json:"container"`
}
type PreGenerateInfo struct {
Buckets []string `json:"buckets"`
Containers []string `json:"containers"`
Objects []PreGenObj `json:"objects"`
ObjSize string `json:"obj_size"`
}
// ImportJSONPreGen writes objects from pregenerated JSON file
// to the registry.
// Note that ImportJSONPreGen does not check if object already
// exists in the registry so in case of re-entry the registry
// will have two entities representing the same object.
func ImportJSONPreGen(o *registry.ObjRegistry, filename string) error {
f, err := os.ReadFile(filename)
if err != nil {
return err
}
var pregenInfo PreGenerateInfo
err = json.Unmarshal(f, &pregenInfo)
if err != nil {
return err
}
// AddObject uses DB.Batch to combine concurrent Batch calls
// into a single Bolt transaction. DB.Batch is limited by
// DB.MaxBatchDelay which may affect perfomance.
for _, obj := range pregenInfo.Objects {
if obj.Bucket != "" {
err = o.AddObject("", "", obj.Bucket, obj.Object, "")
} else {
err = o.AddObject(obj.Container, obj.Object, "", "", "")
}
if err != nil {
return err
}
}
return nil
}

View file

@ -1,27 +0,0 @@
package importer
import (
"git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/registry"
"github.com/spf13/cobra"
)
// Cmd represents the import command.
var Cmd = &cobra.Command{
Use: "import",
Short: "Import objects into registry",
Long: "Import objects into registry from pregenerated files",
Example: `xk6-registry import registry.bolt preset.json
xk6-registry import registry.bolt preset.json another_preset.json`,
RunE: runCmd,
Args: cobra.MinimumNArgs(2),
}
func runCmd(cmd *cobra.Command, args []string) error {
objRegistry := registry.NewObjRegistry(cmd.Context(), args[0])
for i := 1; i < len(args); i++ {
if err := ImportJSONPreGen(objRegistry, args[i]); err != nil {
return err
}
}
return nil
}

View file

@ -1,18 +0,0 @@
package main
import (
"context"
"os"
"os/signal"
"syscall"
)
func main() {
ctx, _ := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM, syscall.SIGHUP)
if cmd, err := rootCmd.ExecuteContextC(ctx); err != nil {
cmd.PrintErrln("Error:", err.Error())
cmd.PrintErrf("Run '%v --help' for usage.\n", cmd.CommandPath())
os.Exit(1)
}
}

View file

@ -1,33 +0,0 @@
package main
import (
"runtime"
"git.frostfs.info/TrueCloudLab/xk6-frostfs/cmd/xk6-registry/importer"
"git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/version"
"github.com/spf13/cobra"
)
var rootCmd = &cobra.Command{
Use: "xk6-registry",
Version: version.Version,
Short: "Command Line Tool to work with Registry",
Long: `Registry provides tools to work with object registry for xk6.
It contains command for importing objects in registry from preset`,
SilenceErrors: true,
SilenceUsage: true,
Run: rootCmdRun,
}
func init() {
cobra.AddTemplateFunc("runtimeVersion", runtime.Version)
rootCmd.SetVersionTemplate(`FrostFS xk6-registry
{{printf "Version: %s" .Version }}
GoVersion: {{ runtimeVersion }}
`)
rootCmd.AddCommand(importer.Cmd)
}
func rootCmdRun(cmd *cobra.Command, _ []string) {
_ = cmd.Usage()
}

View file

@ -2,7 +2,7 @@ import local from 'k6/x/frostfs/local';
import { uuidv4 } from '../scenarios/libs/k6-utils-1.4.0.js'; import { uuidv4 } from '../scenarios/libs/k6-utils-1.4.0.js';
const payload = open('../go.sum', 'b'); const payload = open('../go.sum', 'b');
const local_cli = local.connect("/path/to/config.yaml", "/path/to/config/dir", "", false) const local_cli = local.connect("/path/to/config.yaml", "", false)
export const options = { export const options = {
stages: [ stages: [

View file

@ -3,16 +3,17 @@ import { fail } from "k6";
import { uuidv4 } from '../scenarios/libs/k6-utils-1.4.0.js'; import { uuidv4 } from '../scenarios/libs/k6-utils-1.4.0.js';
const payload = open('../go.sum', 'b'); const payload = open('../go.sum', 'b');
const frostfs_cli = native.connect("s01.frostfs.devenv:8080", "1dd37fba80fec4e6a6f13fd708d8dcb3b29def768017052f6c930fa1c5d90bbb", 0, 0, false, 0) const frostfs_cli = native.connect("s01.frostfs.devenv:8080", "1dd37fba80fec4e6a6f13fd708d8dcb3b29def768017052f6c930fa1c5d90bbb", 0, 0)
export const options = { export const options = {
stages: [ stages: [
{ duration: '30s', target: 10 }, {duration: '30s', target: 10},
], ],
}; };
export function setup() { export function setup() {
const params = { const params = {
acl: 'public-read-write',
placement_policy: 'REP 3', placement_policy: 'REP 3',
name: 'container-name', name: 'container-name',
name_global_scope: 'false' name_global_scope: 'false'
@ -23,7 +24,7 @@ export function setup() {
fail(res.error) fail(res.error)
} }
console.info("created container", res.container_id) console.info("created container", res.container_id)
return { container_id: res.container_id } return {container_id: res.container_id}
} }
export default function (data) { export default function (data) {

View file

@ -3,7 +3,7 @@ import { uuidv4 } from '../scenarios/libs/k6-utils-1.4.0.js';
const payload = open('../go.sum', 'b'); const payload = open('../go.sum', 'b');
const container = "AjSxSNNXbJUDPqqKYm1VbFVDGCakbpUNH8aGjPmGAH3B" const container = "AjSxSNNXbJUDPqqKYm1VbFVDGCakbpUNH8aGjPmGAH3B"
const frostfs_cli = native.connect("s01.frostfs.devenv:8080", "", 0, 0, false, 0) const frostfs_cli = native.connect("s01.frostfs.devenv:8080", "", 0, 0)
const frostfs_obj = frostfs_cli.onsite(container, payload) const frostfs_obj = frostfs_cli.onsite(container, payload)
export const options = { export const options = {

View file

@ -3,7 +3,7 @@ import { uuidv4 } from '../scenarios/libs/k6-utils-1.4.0.js';
const bucket = "testbucket" const bucket = "testbucket"
const payload = open('../go.sum', 'b'); const payload = open('../go.sum', 'b');
const s3local_cli = s3local.connect("path/to/storage/config.yml", "path/to/storage/config/dir", {}, { const s3local_cli = s3local.connect("path/to/storage/config.yml", {}, {
'testbucket': 'GBQDDUM1hdodXmiRHV57EUkFWJzuntsG8BG15wFSwam6', 'testbucket': 'GBQDDUM1hdodXmiRHV57EUkFWJzuntsG8BG15wFSwam6',
}); });

174
go.mod
View file

@ -1,132 +1,122 @@
module git.frostfs.info/TrueCloudLab/xk6-frostfs module git.frostfs.info/TrueCloudLab/xk6-frostfs
go 1.22 go 1.19
require ( require (
git.frostfs.info/TrueCloudLab/frostfs-node v0.38.3-0.20240502170333-ec2873caa7c6 git.frostfs.info/TrueCloudLab/frostfs-node v0.22.2-0.20230522084814-731bf5d0ee66
git.frostfs.info/TrueCloudLab/frostfs-s3-gw v0.29.0-rc.1.0.20240422122918-034396d554ec git.frostfs.info/TrueCloudLab/frostfs-s3-gw v0.24.1-0.20230403110435-01afa1cae425
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240502080121-12ddefe07877 git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20230519144724-f5b23eb22569
git.frostfs.info/TrueCloudLab/tzhash v1.8.0 git.frostfs.info/TrueCloudLab/tzhash v1.8.0
github.com/aws/aws-sdk-go-v2 v1.19.0 github.com/aws/aws-sdk-go-v2 v1.16.3
github.com/aws/aws-sdk-go-v2/config v1.18.28 github.com/aws/aws-sdk-go-v2/config v1.15.5
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.72 github.com/aws/aws-sdk-go-v2/service/s3 v1.26.9
github.com/aws/aws-sdk-go-v2/service/s3 v1.37.0 github.com/dop251/goja v0.0.0-20230427124612-428fc442ff5f
github.com/dop251/goja v0.0.0-20230626124041-ba8a63e79201
github.com/go-loremipsum/loremipsum v1.1.3 github.com/go-loremipsum/loremipsum v1.1.3
github.com/google/uuid v1.6.0 github.com/google/uuid v1.3.0
github.com/joho/godotenv v1.5.1 github.com/joho/godotenv v1.5.1
github.com/nspcc-dev/neo-go v0.105.1 github.com/nspcc-dev/neo-go v0.101.1
github.com/panjf2000/ants/v2 v2.9.0 github.com/panjf2000/ants/v2 v2.5.0
github.com/sirupsen/logrus v1.9.3 github.com/sirupsen/logrus v1.9.2
github.com/spf13/cobra v1.8.0 github.com/stretchr/testify v1.8.3
github.com/stretchr/testify v1.8.4 go.etcd.io/bbolt v1.3.6
go.etcd.io/bbolt v1.3.8 go.k6.io/k6 v0.44.2-0.20230524054758-add1a5fe5019
go.k6.io/k6 v0.45.1 go.uber.org/zap v1.24.0
go.uber.org/zap v1.26.0 golang.org/x/sys v0.8.0
golang.org/x/sys v0.18.0
) )
require ( require (
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240427200446-67c6f305b21f // indirect git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.15.1-0.20230519114017-0c67b8fefa41 // indirect
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240409115729-6eb492025bdd // indirect
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 // indirect git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 // indirect
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20231101111734-b3ad3335ff65 // indirect git.frostfs.info/TrueCloudLab/hrw v1.2.0 // indirect
git.frostfs.info/TrueCloudLab/hrw v1.2.1 // indirect
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240416071728-04a79f57ef1f // indirect
git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0 // indirect git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0 // indirect
git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02 // indirect github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230512164433-5d1fd1a340c9 // indirect
github.com/antlr4-go/antlr/v4 v4.13.0 // indirect github.com/aws/aws-sdk-go v1.44.6 // indirect
github.com/aws/aws-sdk-go v1.44.296 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.1 // indirect
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10 // indirect github.com/aws/aws-sdk-go-v2/credentials v1.12.0 // indirect
github.com/aws/aws-sdk-go-v2/credentials v1.13.27 // indirect github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.4 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.5 // indirect github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.10 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.35 // indirect github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.4 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.29 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.3.11 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.36 // indirect github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.1 // indirect
github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.27 // indirect github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.1 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.11 // indirect github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.5 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.30 // indirect github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.4 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.29 // indirect github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.4 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.14.4 // indirect github.com/aws/aws-sdk-go-v2/service/sso v1.11.4 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.12.13 // indirect github.com/aws/aws-sdk-go-v2/service/sts v1.16.4 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.13 // indirect github.com/aws/smithy-go v1.11.2 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.19.3 // indirect
github.com/aws/smithy-go v1.13.5 // indirect
github.com/beorn7/perks v1.0.1 // indirect github.com/beorn7/perks v1.0.1 // indirect
github.com/bluele/gcache v0.0.2 // indirect github.com/bluele/gcache v0.0.2 // indirect
github.com/cenkalti/backoff/v4 v4.2.1 // indirect github.com/cenkalti/backoff/v4 v4.2.1 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/davecgh/go-spew v1.1.1 // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect
github.com/dlclark/regexp2 v1.10.0 // indirect github.com/dlclark/regexp2 v1.10.0 // indirect
github.com/fatih/color v1.15.0 // indirect github.com/fatih/color v1.15.0 // indirect
github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect
github.com/go-chi/chi/v5 v5.0.8 // indirect github.com/go-logr/logr v1.2.4 // indirect
github.com/go-logr/logr v1.4.1 // indirect
github.com/go-logr/stdr v1.2.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-pkgz/expirable-cache/v3 v3.0.0 // indirect
github.com/go-sourcemap/sourcemap v2.1.4-0.20211119122758-180fcef48034+incompatible // indirect github.com/go-sourcemap/sourcemap v2.1.4-0.20211119122758-180fcef48034+incompatible // indirect
github.com/golang/snappy v0.0.4 // indirect github.com/golang/protobuf v1.5.3 // indirect
github.com/google/pprof v0.0.0-20230817174616-7a8ec2ada47b // indirect github.com/google/pprof v0.0.0-20230510103437-eeec1cb781c3 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 // indirect github.com/gorilla/mux v1.8.0 // indirect
github.com/hashicorp/golang-lru v1.0.2 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.2 // indirect
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect github.com/hashicorp/golang-lru v0.6.0 // indirect
github.com/hashicorp/golang-lru/v2 v2.0.2 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect github.com/hashicorp/hcl v1.0.0 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/josharian/intern v1.0.0 // indirect github.com/josharian/intern v1.0.0 // indirect
github.com/klauspost/compress v1.17.4 // indirect github.com/klauspost/compress v1.16.5 // indirect
github.com/magiconair/properties v1.8.7 // indirect github.com/magiconair/properties v1.8.7 // indirect
github.com/mailru/easyjson v0.7.7 // indirect github.com/mailru/easyjson v0.7.7 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.19 // indirect github.com/mattn/go-isatty v0.0.19 // indirect
github.com/minio/sio v0.3.1 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/minio/highwayhash v1.0.2 // indirect
github.com/minio/sio v0.3.0 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/mr-tron/base58 v1.2.0 // indirect github.com/mr-tron/base58 v1.2.0 // indirect
github.com/mstoykov/atlas v0.0.0-20220811071828-388f114305dd // indirect github.com/mstoykov/atlas v0.0.0-20220811071828-388f114305dd // indirect
github.com/nats-io/nats.go v1.32.0 // indirect github.com/nats-io/jwt/v2 v2.4.1 // indirect
github.com/nats-io/nkeys v0.4.7 // indirect github.com/nats-io/nats.go v1.25.0 // indirect
github.com/nats-io/nkeys v0.4.4 // indirect
github.com/nats-io/nuid v1.0.1 // indirect github.com/nats-io/nuid v1.0.1 // indirect
github.com/nspcc-dev/go-ordered-json v0.0.0-20240112074137-296698a162ae // indirect
github.com/nspcc-dev/rfc6979 v0.2.0 // indirect github.com/nspcc-dev/rfc6979 v0.2.0 // indirect
github.com/onsi/ginkgo v1.16.5 // indirect
github.com/onsi/gomega v1.20.2 // indirect github.com/onsi/gomega v1.20.2 // indirect
github.com/pelletier/go-toml/v2 v2.1.1 // indirect github.com/pelletier/go-toml/v2 v2.0.7 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_golang v1.18.0 // indirect github.com/prometheus/client_golang v1.15.1 // indirect
github.com/prometheus/client_model v0.5.0 // indirect github.com/prometheus/client_model v0.4.0 // indirect
github.com/prometheus/common v0.46.0 // indirect github.com/prometheus/common v0.44.0 // indirect
github.com/prometheus/procfs v0.12.0 // indirect github.com/prometheus/procfs v0.10.0 // indirect
github.com/sagikazarmark/locafero v0.4.0 // indirect
github.com/sagikazarmark/slog-shim v0.1.0 // indirect
github.com/serenize/snaker v0.0.0-20201027110005-a7ad2135616e // indirect github.com/serenize/snaker v0.0.0-20201027110005-a7ad2135616e // indirect
github.com/sourcegraph/conc v0.3.0 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect
github.com/spf13/afero v1.11.0 // indirect github.com/spf13/afero v1.9.5 // indirect
github.com/spf13/cast v1.6.0 // indirect github.com/spf13/cast v1.5.0 // indirect
github.com/spf13/jwalterweatherman v1.1.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect github.com/spf13/pflag v1.0.5 // indirect
github.com/spf13/viper v1.18.2 // indirect github.com/spf13/viper v1.15.0 // indirect
github.com/ssgreg/journald v1.0.0 // indirect github.com/subosito/gotenv v1.4.2 // indirect
github.com/subosito/gotenv v1.6.0 // indirect go.opentelemetry.io/otel v1.15.1 // indirect
github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.15.1 // indirect
github.com/twmb/murmur3 v1.1.8 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.15.1 // indirect
go.opentelemetry.io/otel v1.22.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.15.1 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0 // indirect go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.15.1 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.22.0 // indirect go.opentelemetry.io/otel/sdk v1.15.1 // indirect
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.22.0 // indirect go.opentelemetry.io/otel/trace v1.15.1 // indirect
go.opentelemetry.io/otel/metric v1.22.0 // indirect go.opentelemetry.io/proto/otlp v0.19.0 // indirect
go.opentelemetry.io/otel/sdk v1.22.0 // indirect go.uber.org/atomic v1.11.0 // indirect
go.opentelemetry.io/otel/trace v1.22.0 // indirect
go.opentelemetry.io/proto/otlp v1.1.0 // indirect
go.uber.org/multierr v1.11.0 // indirect go.uber.org/multierr v1.11.0 // indirect
golang.org/x/crypto v0.21.0 // indirect golang.org/x/crypto v0.9.0 // indirect
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a // indirect golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc // indirect
golang.org/x/net v0.23.0 // indirect golang.org/x/net v0.10.0 // indirect
golang.org/x/sync v0.6.0 // indirect golang.org/x/sync v0.2.0 // indirect
golang.org/x/text v0.14.0 // indirect golang.org/x/text v0.9.0 // indirect
golang.org/x/time v0.5.0 // indirect golang.org/x/time v0.3.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de // indirect google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de // indirect google.golang.org/grpc v1.55.0 // indirect
google.golang.org/grpc v1.63.2 // indirect google.golang.org/protobuf v1.30.0 // indirect
google.golang.org/protobuf v1.33.0 // indirect
gopkg.in/guregu/null.v3 v3.5.0 // indirect gopkg.in/guregu/null.v3 v3.5.0 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect

825
go.sum

File diff suppressed because it is too large Load diff

22
help.mk
View file

@ -1,22 +0,0 @@
.PHONY: help
# Show this help prompt
help:
@echo ' Usage:'
@echo ''
@echo ' make <target>'
@echo ''
@echo ' Targets:'
@echo ''
@awk '/^#/{ comment = substr($$0,3) } comment && /^[a-zA-Z][a-zA-Z0-9.%_/-]+ ?:/{ print " ", $$1, comment }' $(MAKEFILE_LIST) | column -t -s ':' | grep -v 'IGNORE' | sort | uniq
# Show help for docker/% IGNORE
help.docker/%:
$(eval TARGETS:=$(notdir all lint) ${BINS})
@echo ' Usage:'
@echo ''
@echo ' make docker/% -- Run `make %` in Golang container'
@echo ''
@echo ' Supported docker targets:'
@echo ''
@$(foreach bin, $(TARGETS), echo ' ' $(bin);)

View file

@ -38,7 +38,7 @@ func (d *Datagen) Exports() modules.Exports {
return modules.Exports{Default: d} return modules.Exports{Default: d}
} }
func (d *Datagen) Generator(size int, typ string, streaming bool) *Generator { func (d *Datagen) Generator(size int, typ string) *Generator {
g := NewGenerator(d.vu, size, strings.ToLower(typ), streaming) g := NewGenerator(d.vu, size, strings.ToLower(typ))
return &g return &g
} }

View file

@ -2,10 +2,11 @@ package datagen
import ( import (
"bytes" "bytes"
"crypto/sha256"
"encoding/hex"
"math/rand" "math/rand"
"sync/atomic"
"time"
"github.com/dop251/goja"
"github.com/go-loremipsum/loremipsum" "github.com/go-loremipsum/loremipsum"
"go.k6.io/k6/js/modules" "go.k6.io/k6/js/modules"
) )
@ -26,9 +27,11 @@ type (
buf []byte buf []byte
typ string typ string
offset int offset int
}
streaming bool GenPayloadResponse struct {
seed *atomic.Int64 Payload goja.ArrayBuffer
Hash string
} }
) )
@ -41,7 +44,7 @@ var payloadTypes = []string{
"", "",
} }
func NewGenerator(vu modules.VU, size int, typ string, streaming bool) Generator { func NewGenerator(vu modules.VU, size int, typ string) Generator {
if size <= 0 { if size <= 0 {
panic("size should be positive") panic("size should be positive")
} }
@ -50,57 +53,46 @@ func NewGenerator(vu modules.VU, size int, typ string, streaming bool) Generator
for i := range payloadTypes { for i := range payloadTypes {
if payloadTypes[i] == typ { if payloadTypes[i] == typ {
found = true found = true
break
} }
} }
if !found { if !found {
vu.InitEnv().Logger.Info("Unknown payload type '%s', random will be used.", typ) vu.InitEnv().Logger.Info("Unknown payload type '%s', random will be used.", typ)
} }
g := Generator{ return Generator{vu: vu, size: size, buf: nil, typ: typ, offset: 0}
vu: vu,
size: size,
typ: typ,
}
if streaming {
g.streaming = true
g.seed = new(atomic.Int64)
} else {
g.rand = rand.New(rand.NewSource(time.Now().UnixNano()))
g.buf = make([]byte, size+TailSize)
g.fillBuffer()
}
return g
} }
func (g *Generator) fillBuffer() { func (g *Generator) GenPayload(calcHash bool) GenPayloadResponse {
data := g.nextSlice()
dataHash := ""
if calcHash {
hashBytes := sha256.Sum256(data)
dataHash = hex.EncodeToString(hashBytes[:])
}
payload := g.vu.Runtime().NewArrayBuffer(data)
return GenPayloadResponse{Payload: payload, Hash: dataHash}
}
func (g *Generator) nextSlice() []byte {
if g.buf == nil {
// Allocate buffer with extra tail for sliding and populate it with random bytes
g.buf = make([]byte, g.size+TailSize)
rand.Read(g.buf) // Per docs, err is always nil here
switch g.typ { switch g.typ {
case "text": case "text":
li := loremipsum.New() li := loremipsum.New()
b := bytes.NewBuffer(g.buf[:0]) b := bytes.NewBuffer(nil)
for b.Len() < g.size+TailSize { for b.Len() < g.size+TailSize {
b.WriteString(li.Paragraph()) b.WriteString(li.Paragraph())
b.WriteRune('\n') b.WriteRune('\n')
} }
g.buf = b.Bytes() g.buf = b.Bytes()
default: default:
g.rand.Read(g.buf) // Per docs, err is always nil here // Allocate buffer with extra tail for sliding and populate it with random bytes
g.buf = make([]byte, g.size+TailSize)
rand.Read(g.buf) // Per docs, err is always nil here
} }
}
func (g *Generator) GenPayload() Payload {
if g.streaming {
return NewStreamPayload(g.size, g.seed.Add(1), g.typ)
}
data := g.nextSlice()
return NewFixedPayload(data)
}
func (g *Generator) nextSlice() []byte {
if g.offset+g.size >= len(g.buf) {
g.offset = 0
g.fillBuffer()
} }
result := g.buf[g.offset : g.offset+g.size] result := g.buf[g.offset : g.offset+g.size]
@ -108,5 +100,10 @@ func (g *Generator) nextSlice() []byte {
// Shift the offset for the next call. If we've used our entire tail, then erase // Shift the offset for the next call. If we've used our entire tail, then erase
// the buffer so that on the next call it is regenerated anew // the buffer so that on the next call it is regenerated anew
g.offset += 1 g.offset += 1
if g.offset+g.size >= len(g.buf) {
g.buf = nil
g.offset = 0
}
return result return result
} }

View file

@ -16,25 +16,25 @@ func TestGenerator(t *testing.T) {
t.Run("fails on negative size", func(t *testing.T) { t.Run("fails on negative size", func(t *testing.T) {
require.Panics(t, func() { require.Panics(t, func() {
_ = NewGenerator(vu, -1, "", false) _ = NewGenerator(vu, -1, "")
}) })
}) })
t.Run("fails on zero size", func(t *testing.T) { t.Run("fails on zero size", func(t *testing.T) {
require.Panics(t, func() { require.Panics(t, func() {
_ = NewGenerator(vu, 0, "", false) _ = NewGenerator(vu, 0, "")
}) })
}) })
t.Run("creates slice of specified size", func(t *testing.T) { t.Run("creates slice of specified size", func(t *testing.T) {
size := 10 size := 10
g := NewGenerator(vu, size, "", false) g := NewGenerator(vu, size, "")
slice := g.nextSlice() slice := g.nextSlice()
require.Len(t, slice, size) require.Len(t, slice, size)
}) })
t.Run("creates a different slice on each call", func(t *testing.T) { t.Run("creates a different slice on each call", func(t *testing.T) {
g := NewGenerator(vu, 1000, "", false) g := NewGenerator(vu, 1000, "")
slice1 := g.nextSlice() slice1 := g.nextSlice()
slice2 := g.nextSlice() slice2 := g.nextSlice()
// Each slice should be unique (assuming that 1000 random bytes will never coincide // Each slice should be unique (assuming that 1000 random bytes will never coincide
@ -43,7 +43,7 @@ func TestGenerator(t *testing.T) {
}) })
t.Run("keeps generating slices after consuming entire tail", func(t *testing.T) { t.Run("keeps generating slices after consuming entire tail", func(t *testing.T) {
g := NewGenerator(vu, 1000, "", false) g := NewGenerator(vu, 1000, "")
initialSlice := g.nextSlice() initialSlice := g.nextSlice()
for i := 0; i < TailSize; i++ { for i := 0; i < TailSize; i++ {
g.nextSlice() g.nextSlice()

View file

@ -1,121 +0,0 @@
package datagen
import (
"bufio"
"bytes"
"crypto/sha256"
"encoding/hex"
"hash"
"io"
"math/rand"
"github.com/go-loremipsum/loremipsum"
)
// Payload represents arbitrary data to be packed into S3 or native object.
// Implementations could be thread-unsafe.
type Payload interface {
// Reader returns io.Reader instance to read the payload.
// Must not be called twice.
Reader() io.Reader
// Bytes is a helper which reads all data from Reader() into slice.
// The sole purpose of this method is to simplify HTTP scenario,
// where all payload needs to be read and wrapped.
Bytes() []byte
// Size returns payload size, which is equal to the total amount of data
// that could be read from the Reader().
Size() int
// Hash returns payload sha256 hash. Must be called after all data is read from the reader.
Hash() string
}
type bytesPayload struct {
data []byte
}
func (p *bytesPayload) Reader() io.Reader {
return bytes.NewReader(p.data)
}
func (p *bytesPayload) Size() int {
return len(p.data)
}
func (p *bytesPayload) Hash() string {
h := sha256.Sum256(p.data[:])
return hex.EncodeToString(h[:])
}
func (p *bytesPayload) Bytes() []byte {
return p.data
}
func NewFixedPayload(data []byte) Payload {
return &bytesPayload{data: data}
}
type randomPayload struct {
r io.Reader
s hash.Hash
h string
size int
}
func NewStreamPayload(size int, seed int64, typ string) Payload {
var rr io.Reader
switch typ {
case "text":
rr = &textReader{li: loremipsum.NewWithSeed(seed)}
default:
rr = rand.New(rand.NewSource(seed))
}
lr := io.LimitReader(rr, int64(size))
// We need some buffering to write complete blocks in the TeeReader.
// Streaming payload read is expected to be used for big objects, thus 4k seems like a good choice.
br := bufio.NewReaderSize(lr, 4096)
s := sha256.New()
tr := io.TeeReader(br, s)
return &randomPayload{
r: tr,
s: s,
size: size,
}
}
func (p *randomPayload) Reader() io.Reader {
return p.r
}
func (p *randomPayload) Size() int {
return p.size
}
func (p *randomPayload) Hash() string {
if p.h == "" {
p.h = hex.EncodeToString(p.s.Sum(nil))
// Prevent possible misuse.
p.r = nil
p.s = nil
}
return p.h
}
func (p *randomPayload) Bytes() []byte {
data, err := io.ReadAll(p.r)
if err != nil {
// We use only 2 readers, either `bytes.Reader` or `rand.Reader`.
// None of them returns errors, thus encountering an error is a fatal error.
panic(err)
}
return data
}
type textReader struct {
li *loremipsum.LoremIpsum
}
func (r *textReader) Read(p []byte) (n int, err error) {
paragraph := r.li.Paragraph()
return copy(p, paragraph), nil
}

View file

@ -1,40 +0,0 @@
package datagen
import (
"crypto/rand"
"crypto/sha256"
"encoding/hex"
"io"
"testing"
"github.com/stretchr/testify/require"
)
func TestFixedPayload(t *testing.T) {
const size = 123
data := make([]byte, size)
_, err := rand.Read(data)
require.NoError(t, err)
p := NewFixedPayload(data)
require.Equal(t, size, p.Size())
actual, err := io.ReadAll(p.Reader())
require.NoError(t, err)
require.Equal(t, data, actual)
h := sha256.Sum256(data)
require.Equal(t, hex.EncodeToString(h[:]), p.Hash())
}
func TestStreamingPayload(t *testing.T) {
const size = 123
p := NewStreamPayload(size, 0, "")
require.Equal(t, size, p.Size())
actual, err := io.ReadAll(p.Reader())
require.NoError(t, err)
require.Equal(t, size, len(actual))
require.Equal(t, sha256.Size*2, len(p.Hash()))
}

View file

@ -5,15 +5,14 @@ import (
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/datagen"
"git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/local/rawclient" "git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/local/rawclient"
"github.com/dop251/goja"
"go.k6.io/k6/js/modules" "go.k6.io/k6/js/modules"
) )
type Client struct { type Client struct {
vu modules.VU vu modules.VU
rc *rawclient.RawClient rc *rawclient.RawClient
l Limiter
} }
type ( type (
@ -26,21 +25,13 @@ type (
Success bool Success bool
ObjectID string ObjectID string
Error string Error string
Abort bool
} }
GetResponse SuccessOrErrorResponse GetResponse SuccessOrErrorResponse
DeleteResponse SuccessOrErrorResponse DeleteResponse SuccessOrErrorResponse
) )
func (c *Client) Put(containerID string, headers map[string]string, payload datagen.Payload) PutResponse { func (c *Client) Put(containerID string, headers map[string]string, payload goja.ArrayBuffer) PutResponse {
if c.l.IsFull() {
return PutResponse{
Success: false,
Error: "engine size limit reached",
Abort: true,
}
}
id, err := c.rc.Put(c.vu.Context(), mustParseContainerID(containerID), nil, headers, payload.Bytes()) id, err := c.rc.Put(c.vu.Context(), mustParseContainerID(containerID), nil, headers, payload.Bytes())
if err != nil { if err != nil {
return PutResponse{Error: err.Error()} return PutResponse{Error: err.Error()}

View file

@ -1,105 +0,0 @@
package local
import (
"sync/atomic"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/metrics"
)
var (
_ Limiter = &noopLimiter{}
_ Limiter = &sizeLimiter{}
)
type Limiter interface {
engine.MetricRegister
IsFull() bool
}
func NewLimiter(maxSizeGB int64) Limiter {
if maxSizeGB < 0 {
panic("max size is negative")
}
if maxSizeGB == 0 {
return &noopLimiter{}
}
return &sizeLimiter{
maxSize: maxSizeGB * 1024 * 1024 * 1024,
currentSize: &atomic.Int64{},
}
}
type sizeLimiter struct {
maxSize int64
currentSize *atomic.Int64
}
func (*sizeLimiter) AddMethodDuration(method string, d time.Duration) {}
func (*sizeLimiter) AddToContainerSize(cnrID string, size int64) {}
func (*sizeLimiter) AddToObjectCounter(shardID string, objectType string, delta int) {}
func (*sizeLimiter) ClearErrorCounter(shardID string) {}
func (*sizeLimiter) DeleteShardMetrics(shardID string) {}
func (*sizeLimiter) GC() metrics.GCMetrics { return &noopGCMetrics{} }
func (*sizeLimiter) IncErrorCounter(shardID string) {}
func (*sizeLimiter) SetMode(shardID string, mode mode.Mode) {}
func (*sizeLimiter) SetObjectCounter(shardID string, objectType string, v uint64) {}
func (*sizeLimiter) WriteCache() metrics.WriteCacheMetrics { return &noopWriteCacheMetrics{} }
func (*sizeLimiter) DeleteContainerSize(cnrID string) {}
func (*sizeLimiter) DeleteContainerCount(cnrID string) {}
func (*sizeLimiter) SetContainerObjectCounter(_, _, _ string, _ uint64) {}
func (*sizeLimiter) IncContainerObjectCounter(_, _, _ string) {}
func (*sizeLimiter) SubContainerObjectCounter(_, _, _ string, _ uint64) {}
func (*sizeLimiter) IncRefillObjectsCount(_, _ string, _ int, _ bool) {}
func (*sizeLimiter) SetRefillPercent(_, _ string, _ uint32) {}
func (*sizeLimiter) SetRefillStatus(_, _, _ string) {}
func (sl *sizeLimiter) AddToPayloadCounter(shardID string, size int64) {
sl.currentSize.Add(size)
}
func (sl *sizeLimiter) IsFull() bool {
cur := sl.currentSize.Load()
return cur > sl.maxSize
}
type noopLimiter struct{}
func (*noopLimiter) AddMethodDuration(method string, d time.Duration) {}
func (*noopLimiter) AddToContainerSize(cnrID string, size int64) {}
func (*noopLimiter) AddToObjectCounter(shardID string, objectType string, delta int) {}
func (*noopLimiter) AddToPayloadCounter(shardID string, size int64) {}
func (*noopLimiter) ClearErrorCounter(shardID string) {}
func (*noopLimiter) DeleteShardMetrics(shardID string) {}
func (*noopLimiter) GC() metrics.GCMetrics { return &noopGCMetrics{} }
func (*noopLimiter) IncErrorCounter(shardID string) {}
func (*noopLimiter) SetMode(shardID string, mode mode.Mode) {}
func (*noopLimiter) SetObjectCounter(shardID string, objectType string, v uint64) {}
func (*noopLimiter) WriteCache() metrics.WriteCacheMetrics { return &noopWriteCacheMetrics{} }
func (*noopLimiter) IsFull() bool { return false }
func (*noopLimiter) DeleteContainerSize(cnrID string) {}
func (*noopLimiter) DeleteContainerCount(cnrID string) {}
func (*noopLimiter) SetContainerObjectCounter(_, _, _ string, _ uint64) {}
func (*noopLimiter) IncContainerObjectCounter(_, _, _ string) {}
func (*noopLimiter) SubContainerObjectCounter(_, _, _ string, _ uint64) {}
func (*noopLimiter) IncRefillObjectsCount(_, _ string, _ int, _ bool) {}
func (*noopLimiter) SetRefillPercent(_, _ string, _ uint32) {}
func (*noopLimiter) SetRefillStatus(_, _, _ string) {}
type noopGCMetrics struct{}
func (*noopGCMetrics) AddDeletedCount(shardID string, deleted uint64, failed uint64) {}
func (*noopGCMetrics) AddExpiredObjectCollectionDuration(string, time.Duration, bool, string) {}
func (*noopGCMetrics) AddInhumedObjectCount(shardID string, count uint64, objectType string) {}
func (*noopGCMetrics) AddRunDuration(shardID string, d time.Duration, success bool) {}
type noopWriteCacheMetrics struct{}
func (*noopWriteCacheMetrics) AddMethodDuration(_, _, _, _ string, _ bool, _ time.Duration) {}
func (*noopWriteCacheMetrics) Close(_, _ string) {}
func (*noopWriteCacheMetrics) IncOperationCounter(_, _, _, _ string, _ metrics.NullBool) {}
func (*noopWriteCacheMetrics) SetActualCount(_, _, _ string, count uint64) {}
func (*noopWriteCacheMetrics) SetEstimateSize(_, _, _ string, _ uint64) {}
func (*noopWriteCacheMetrics) SetMode(shardID string, mode string) {}

View file

@ -40,18 +40,15 @@ type RootModule struct {
mu sync.Mutex mu sync.Mutex
// configFile is the name of the configuration file used during one test. // configFile is the name of the configuration file used during one test.
configFile string configFile string
// configDir is the name of the configuration directory used during one test.
configDir string
// ng is the engine instance used during one test, corresponding to the configFile. Each VU // ng is the engine instance used during one test, corresponding to the configFile. Each VU
// gets the same engine instance. // gets the same engine instance.
ng *engine.StorageEngine ng *engine.StorageEngine
l Limiter
} }
// Local represents an instance of the module for every VU. // Local represents an instance of the module for every VU.
type Local struct { type Local struct {
vu modules.VU vu modules.VU
ResolveEngine func(context.Context, string, string, bool, int64) (*engine.StorageEngine, Limiter, error) ResolveEngine func(context.Context, string, bool) (*engine.StorageEngine, error)
} }
// Ensure the interfaces are implemented correctly. // Ensure the interfaces are implemented correctly.
@ -59,9 +56,9 @@ var (
_ modules.Module = &RootModule{} _ modules.Module = &RootModule{}
_ modules.Instance = &Local{} _ modules.Instance = &Local{}
objPutSuccess, objPutFails, objPutDuration, objPutData *metrics.Metric objPutTotal, objPutFails, objPutDuration *metrics.Metric
objGetSuccess, objGetFails, objGetDuration, objGetData *metrics.Metric objGetTotal, objGetFails, objGetDuration *metrics.Metric
objDeleteSuccess, objDeleteFails, objDeleteDuration *metrics.Metric objDeleteTotal, objDeleteFails, objDeleteDuration *metrics.Metric
) )
func init() { func init() {
@ -74,7 +71,7 @@ func (r *RootModule) NewModuleInstance(vu modules.VU) modules.Instance {
return NewLocalModuleInstance(vu, r.GetOrCreateEngine) return NewLocalModuleInstance(vu, r.GetOrCreateEngine)
} }
func NewLocalModuleInstance(vu modules.VU, resolveEngine func(context.Context, string, string, bool, int64) (*engine.StorageEngine, Limiter, error)) *Local { func NewLocalModuleInstance(vu modules.VU, resolveEngine func(context.Context, string, bool) (*engine.StorageEngine, error)) *Local {
return &Local{ return &Local{
vu: vu, vu: vu,
ResolveEngine: resolveEngine, ResolveEngine: resolveEngine,
@ -103,53 +100,45 @@ func checkResourceLimits() error {
return nil return nil
} }
// GetOrCreateEngine returns the current engine instance for the given configuration file or directory, // GetOrCreateEngine returns the current engine instance for the given configuration file,
// creating a new one if none exists. Note that the identity of configuration files is their // creating a new one if none exists. Note that the identity of configuration files is their
// file name for the purposes of test runs. // file name for the purposes of test runs.
func (r *RootModule) GetOrCreateEngine(ctx context.Context, configFile string, configDir string, debug bool, maxSizeGB int64) (*engine.StorageEngine, Limiter, error) { func (r *RootModule) GetOrCreateEngine(ctx context.Context, configFile string, debug bool) (*engine.StorageEngine, error) {
r.mu.Lock() r.mu.Lock()
defer r.mu.Unlock() defer r.mu.Unlock()
if len(configFile) == 0 && len(configDir) == 0 { if len(configFile) == 0 {
return nil, nil, errors.New("provide configFile or configDir") return nil, errors.New("configFile cannot be empty")
} }
if r.l == nil {
r.l = NewLimiter(maxSizeGB)
}
// Create and initialize engine for the given configFile if it doesn't exist already // Create and initialize engine for the given configFile if it doesn't exist already
if r.ng == nil { if r.ng == nil {
r.configFile = configFile r.configFile = configFile
r.configDir = configDir appCfg := config.New(configFile, "", "")
appCfg := config.New(configFile, configDir, "") ngOpts, shardOpts, err := storageEngineOptionsFromConfig(appCfg, debug)
ngOpts, shardOpts, err := storageEngineOptionsFromConfig(ctx, appCfg, debug, r.l)
if err != nil { if err != nil {
return nil, nil, fmt.Errorf("creating engine options from config: %v", err) return nil, fmt.Errorf("creating engine options from config: %v", err)
} }
if err := checkResourceLimits(); err != nil { if err := checkResourceLimits(); err != nil {
return nil, nil, err return nil, err
} }
r.ng = engine.New(ngOpts...) r.ng = engine.New(ngOpts...)
for i, opts := range shardOpts { for i, opts := range shardOpts {
if _, err := r.ng.AddShard(ctx, opts...); err != nil { if _, err := r.ng.AddShard(opts...); err != nil {
return nil, nil, fmt.Errorf("adding shard %d: %v", i, err) return nil, fmt.Errorf("adding shard %d: %v", i, err)
} }
} }
if err := r.ng.Open(ctx); err != nil { if err := r.ng.Open(); err != nil {
return nil, nil, fmt.Errorf("opening engine: %v", err) return nil, fmt.Errorf("opening engine: %v", err)
} }
if err := r.ng.Init(ctx); err != nil { if err := r.ng.Init(ctx); err != nil {
return nil, nil, fmt.Errorf("initializing engine: %v", err) return nil, fmt.Errorf("initializing engine: %v", err)
} }
} else if configFile != r.configFile { } else if configFile != r.configFile {
return nil, nil, fmt.Errorf("GetOrCreateEngine called with mismatching configFile after engine was "+ return nil, fmt.Errorf("GetOrCreateEngine called with mismatching configFile after engine was initialized: got %q, want %q", configFile, r.configFile)
"initialized: got %q, want %q", configFile, r.configFile)
} else if configDir != r.configDir {
return nil, nil, fmt.Errorf("GetOrCreateEngine called with mismatching configDir after engine was "+
"initialized: got %q, want %q", configDir, r.configDir)
} }
return r.ng, r.l, nil return r.ng, nil
} }
// Exports implements the modules.Instance interface and returns the exports // Exports implements the modules.Instance interface and returns the exports
@ -160,10 +149,10 @@ func (s *Local) Exports() modules.Exports {
func (s *Local) VU() modules.VU { return s.vu } func (s *Local) VU() modules.VU { return s.vu }
func (s *Local) Connect(configFile, configDir, hexKey string, debug bool, maxSizeGB int64) (*Client, error) { func (s *Local) Connect(configFile, hexKey string, debug bool) (*Client, error) {
ng, l, err := s.ResolveEngine(s.VU().Context(), configFile, configDir, debug, maxSizeGB) ng, err := s.ResolveEngine(s.VU().Context(), configFile, debug)
if err != nil { if err != nil {
return nil, fmt.Errorf("connecting to engine for config - file %q dir %q: %v", configFile, configDir, err) return nil, fmt.Errorf("connecting to engine for config %q: %v", configFile, err)
} }
key, err := ParseOrCreateKey(hexKey) key, err := ParseOrCreateKey(hexKey)
@ -172,19 +161,18 @@ func (s *Local) Connect(configFile, configDir, hexKey string, debug bool, maxSiz
} }
// Register metrics. // Register metrics.
objPutSuccess, _ = stats.Registry.NewMetric("local_obj_put_success", metrics.Counter) registry := metrics.NewRegistry()
objPutFails, _ = stats.Registry.NewMetric("local_obj_put_fails", metrics.Counter) objPutTotal, _ = registry.NewMetric("local_obj_put_total", metrics.Counter)
objPutDuration, _ = stats.Registry.NewMetric("local_obj_put_duration", metrics.Trend, metrics.Time) objPutFails, _ = registry.NewMetric("local_obj_put_fails", metrics.Counter)
objPutData, _ = stats.Registry.NewMetric("local_obj_put_bytes", metrics.Counter, metrics.Data) objPutDuration, _ = registry.NewMetric("local_obj_put_duration", metrics.Trend, metrics.Time)
objGetSuccess, _ = stats.Registry.NewMetric("local_obj_get_success", metrics.Counter) objGetTotal, _ = registry.NewMetric("local_obj_get_total", metrics.Counter)
objGetFails, _ = stats.Registry.NewMetric("local_obj_get_fails", metrics.Counter) objGetFails, _ = registry.NewMetric("local_obj_get_fails", metrics.Counter)
objGetDuration, _ = stats.Registry.NewMetric("local_obj_get_duration", metrics.Trend, metrics.Time) objGetDuration, _ = registry.NewMetric("local_obj_get_duration", metrics.Trend, metrics.Time)
objGetData, _ = stats.Registry.NewMetric("local_obj_get_bytes", metrics.Counter, metrics.Data)
objDeleteSuccess, _ = stats.Registry.NewMetric("local_obj_delete_success", metrics.Counter) objDeleteTotal, _ = registry.NewMetric("local_obj_delete_total", metrics.Counter)
objDeleteFails, _ = stats.Registry.NewMetric("local_obj_delete_fails", metrics.Counter) objDeleteFails, _ = registry.NewMetric("local_obj_delete_fails", metrics.Counter)
objDeleteDuration, _ = stats.Registry.NewMetric("local_obj_delete_duration", metrics.Trend, metrics.Time) objDeleteDuration, _ = registry.NewMetric("local_obj_delete_duration", metrics.Trend, metrics.Time)
// Create raw client backed by local storage engine. // Create raw client backed by local storage engine.
rc := rawclient.New(ng, rc := rawclient.New(ng,
@ -193,32 +181,30 @@ func (s *Local) Connect(configFile, configDir, hexKey string, debug bool, maxSiz
if err != nil { if err != nil {
stats.Report(s.vu, objPutFails, 1) stats.Report(s.vu, objPutFails, 1)
} else { } else {
stats.Report(s.vu, objPutSuccess, 1) stats.Report(s.vu, objPutTotal, 1)
stats.ReportDataSent(s.vu, float64(sz)) stats.ReportDataSent(s.vu, float64(sz))
stats.Report(s.vu, objPutDuration, metrics.D(dt)) stats.Report(s.vu, objPutDuration, metrics.D(dt))
stats.Report(s.vu, objPutData, float64(sz))
} }
}), }),
rawclient.WithGetHandler(func(sz uint64, err error, dt time.Duration) { rawclient.WithGetHandler(func(sz uint64, err error, dt time.Duration) {
if err != nil { if err != nil {
stats.Report(s.vu, objGetFails, 1) stats.Report(s.vu, objGetFails, 1)
} else { } else {
stats.Report(s.vu, objGetSuccess, 1) stats.Report(s.vu, objGetTotal, 1)
stats.Report(s.vu, objGetDuration, metrics.D(dt)) stats.Report(s.vu, objGetDuration, metrics.D(dt))
stats.ReportDataReceived(s.vu, float64(sz)) stats.ReportDataReceived(s.vu, float64(sz))
stats.Report(s.vu, objGetData, float64(sz))
} }
}), }),
rawclient.WithDeleteHandler(func(err error, dt time.Duration) { rawclient.WithDeleteHandler(func(err error, dt time.Duration) {
if err != nil { if err != nil {
stats.Report(s.vu, objDeleteFails, 1) stats.Report(s.vu, objDeleteFails, 1)
} else { } else {
stats.Report(s.vu, objDeleteSuccess, 1) stats.Report(s.vu, objDeleteTotal, 1)
stats.Report(s.vu, objDeleteDuration, metrics.D(dt)) stats.Report(s.vu, objDeleteDuration, metrics.D(dt))
} }
}), }),
) )
return &Client{vu: s.vu, rc: rc, l: l}, nil return &Client{vu: s.vu, rc: rc}, nil
} }
type epochState struct{} type epochState struct{}
@ -231,7 +217,7 @@ func (epochState) CurrentEpoch() uint64 { return 0 }
// preloaded the storage (if any), by using the same configuration file. // preloaded the storage (if any), by using the same configuration file.
// //
// Note that the configuration file only needs to contain the storage-specific sections. // Note that the configuration file only needs to contain the storage-specific sections.
func storageEngineOptionsFromConfig(ctx context.Context, c *config.Config, debug bool, l Limiter) ([]engine.Option, [][]shard.Option, error) { func storageEngineOptionsFromConfig(c *config.Config, debug bool) ([]engine.Option, [][]shard.Option, error) {
log := zap.L() log := zap.L()
if debug { if debug {
var err error var err error
@ -245,12 +231,11 @@ func storageEngineOptionsFromConfig(ctx context.Context, c *config.Config, debug
engine.WithErrorThreshold(engineconfig.ShardErrorThreshold(c)), engine.WithErrorThreshold(engineconfig.ShardErrorThreshold(c)),
engine.WithShardPoolSize(engineconfig.ShardPoolSize(c)), engine.WithShardPoolSize(engineconfig.ShardPoolSize(c)),
engine.WithLogger(&logger.Logger{Logger: log}), engine.WithLogger(&logger.Logger{Logger: log}),
engine.WithMetrics(l),
} }
var shOpts [][]shard.Option var shOpts [][]shard.Option
err := engineconfig.IterateShards(c, false, func(sc *shardconfig.Config) error { engineconfig.IterateShards(c, false, func(sc *shardconfig.Config) error {
opts := []shard.Option{ opts := []shard.Option{
shard.WithRefillMetabase(sc.RefillMetabase()), shard.WithRefillMetabase(sc.RefillMetabase()),
shard.WithMode(sc.Mode()), shard.WithMode(sc.Mode()),
@ -266,7 +251,6 @@ func storageEngineOptionsFromConfig(ctx context.Context, c *config.Config, debug
cfg := blobovniczaconfig.From((*config.Config)(scfg)) cfg := blobovniczaconfig.From((*config.Config)(scfg))
ss := blobstor.SubStorage{ ss := blobstor.SubStorage{
Storage: blobovniczatree.NewBlobovniczaTree( Storage: blobovniczatree.NewBlobovniczaTree(
ctx,
blobovniczatree.WithRootPath(scfg.Path()), blobovniczatree.WithRootPath(scfg.Path()),
blobovniczatree.WithPermissions(scfg.Perm()), blobovniczatree.WithPermissions(scfg.Perm()),
blobovniczatree.WithBlobovniczaSize(cfg.Size()), blobovniczatree.WithBlobovniczaSize(cfg.Size()),
@ -308,22 +292,17 @@ func storageEngineOptionsFromConfig(ctx context.Context, c *config.Config, debug
// write cache // write cache
if wc := sc.WriteCache(); wc.Enabled() { if wc := sc.WriteCache(); wc.Enabled() {
opts = append(opts, opts = append(opts, shard.WithWriteCacheOptions(
shard.WithWriteCache(true),
shard.WithWriteCacheOptions(
[]writecache.Option{
writecache.WithPath(wc.Path()), writecache.WithPath(wc.Path()),
writecache.WithMaxBatchSize(wc.BoltDB().MaxBatchSize()), writecache.WithMaxBatchSize(wc.BoltDB().MaxBatchSize()),
writecache.WithMaxBatchDelay(wc.BoltDB().MaxBatchDelay()), writecache.WithMaxBatchDelay(wc.BoltDB().MaxBatchDelay()),
writecache.WithMaxObjectSize(wc.MaxObjectSize()), writecache.WithMaxObjectSize(wc.MaxObjectSize()),
writecache.WithSmallObjectSize(wc.SmallObjectSize()), writecache.WithSmallObjectSize(wc.SmallObjectSize()),
writecache.WithFlushWorkersCount(wc.WorkerCount()), writecache.WithFlushWorkersCount(wc.WorkersNumber()),
writecache.WithMaxCacheSize(wc.SizeLimit()), writecache.WithMaxCacheSize(wc.SizeLimit()),
writecache.WithNoSync(wc.NoSync()), writecache.WithNoSync(wc.NoSync()),
writecache.WithLogger(&logger.Logger{Logger: log}), writecache.WithLogger(&logger.Logger{Logger: log}),
}, ))
),
)
} }
// tree // tree
@ -375,9 +354,7 @@ func storageEngineOptionsFromConfig(ctx context.Context, c *config.Config, debug
return nil return nil
}) })
if err != nil {
return nil, nil, fmt.Errorf("iterate shards: %w", err)
}
return ngOpts, shOpts, nil return ngOpts, shOpts, nil
} }

View file

@ -52,7 +52,7 @@ func (c *RawClient) Put(ctx context.Context, containerID cid.ID, ownerID *user.I
obj := object.New() obj := object.New()
obj.SetContainerID(containerID) obj.SetContainerID(containerID)
obj.SetOwnerID(*ownerID) obj.SetOwnerID(ownerID)
obj.SetAttributes(attrs...) obj.SetAttributes(attrs...)
obj.SetPayload(payload) obj.SetPayload(payload)
obj.SetPayloadSize(uint64(sz)) obj.SetPayloadSize(uint64(sz))

View file

@ -1,58 +0,0 @@
package native
import (
"context"
"sync"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
)
const networkCacheTTL = time.Minute
var networkInfoCache = &networkInfoCacheT{}
type networkInfoCacheT struct {
guard sync.RWMutex
current *netmap.NetworkInfo
fetchTS time.Time
}
func (c *networkInfoCacheT) getOrFetch(ctx context.Context, cli *client.Client) (*netmap.NetworkInfo, error) {
if v := c.get(); v != nil {
return v, nil
}
return c.fetch(ctx, cli)
}
func (c *networkInfoCacheT) get() *netmap.NetworkInfo {
c.guard.RLock()
defer c.guard.RUnlock()
if c.current == nil || time.Since(c.fetchTS) > networkCacheTTL {
return nil
}
return c.current
}
func (c *networkInfoCacheT) fetch(ctx context.Context, cli *client.Client) (*netmap.NetworkInfo, error) {
c.guard.Lock()
defer c.guard.Unlock()
if time.Since(c.fetchTS) <= networkCacheTTL {
return c.current, nil
}
res, err := cli.NetworkInfo(ctx, client.PrmNetworkInfo{})
if err != nil {
return nil, err
}
v := res.Info()
c.current = &v
c.fetchTS = time.Now()
return c.current, nil
}

View file

@ -1,6 +1,7 @@
package native package native
import ( import (
"bytes"
"context" "context"
"crypto/ecdsa" "crypto/ecdsa"
"crypto/sha256" "crypto/sha256"
@ -13,6 +14,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@ -21,8 +23,8 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version"
"git.frostfs.info/TrueCloudLab/tzhash/tz" "git.frostfs.info/TrueCloudLab/tzhash/tz"
"git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/datagen"
"git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/stats" "git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/stats"
"github.com/dop251/goja"
"go.k6.io/k6/js/modules" "go.k6.io/k6/js/modules"
"go.k6.io/k6/metrics" "go.k6.io/k6/metrics"
) )
@ -33,8 +35,7 @@ type (
key ecdsa.PrivateKey key ecdsa.PrivateKey
tok session.Object tok session.Object
cli *client.Client cli *client.Client
prepareLocally bool bufsize int
maxObjSize uint64
} }
PutResponse struct { PutResponse struct {
@ -68,16 +69,27 @@ type (
vu modules.VU vu modules.VU
key ecdsa.PrivateKey key ecdsa.PrivateKey
cli *client.Client cli *client.Client
bufsize int
hdr object.Object hdr object.Object
payload []byte payload []byte
prepareLocally bool
maxObjSize uint64
} }
) )
const defaultBufferSize = 64 * 1024 const defaultBufferSize = 64 * 1024
func (c *Client) Put(containerID string, headers map[string]string, payload datagen.Payload, chunkSize int) PutResponse { func (c *Client) SetBufferSize(size int) {
if size < 0 {
panic("buffer size must be positive")
}
if size == 0 {
c.bufsize = defaultBufferSize
} else {
c.bufsize = size
}
}
func (c *Client) Put(containerID string, headers map[string]string, payload goja.ArrayBuffer) PutResponse {
cliContainerID := parseContainerID(containerID) cliContainerID := parseContainerID(containerID)
tok := c.tok tok := c.tok
@ -101,10 +113,10 @@ func (c *Client) Put(containerID string, headers map[string]string, payload data
var o object.Object var o object.Object
o.SetContainerID(cliContainerID) o.SetContainerID(cliContainerID)
o.SetOwnerID(owner) o.SetOwnerID(&owner)
o.SetAttributes(attrs...) o.SetAttributes(attrs...)
resp, err := put(c.vu, c.cli, c.prepareLocally, &tok, &o, payload, chunkSize, c.maxObjSize) resp, err := put(c.vu, c.bufsize, c.cli, &tok, &o, payload.Bytes())
if err != nil { if err != nil {
return PutResponse{Success: false, Error: err.Error()} return PutResponse{Success: false, Error: err.Error()}
} }
@ -128,9 +140,9 @@ func (c *Client) Delete(containerID string, objectID string) DeleteResponse {
start := time.Now() start := time.Now()
var prm client.PrmObjectDelete var prm client.PrmObjectDelete
prm.ObjectID = &cliObjectID prm.ByID(cliObjectID)
prm.ContainerID = &cliContainerID prm.FromContainer(cliContainerID)
prm.Session = &tok prm.WithinSession(tok)
_, err = c.cli.ObjectDelete(c.vu.Context(), prm) _, err = c.cli.ObjectDelete(c.vu.Context(), prm)
if err != nil { if err != nil {
@ -138,7 +150,7 @@ func (c *Client) Delete(containerID string, objectID string) DeleteResponse {
return DeleteResponse{Success: false, Error: err.Error()} return DeleteResponse{Success: false, Error: err.Error()}
} }
stats.Report(c.vu, objDeleteSuccess, 1) stats.Report(c.vu, objDeleteTotal, 1)
stats.Report(c.vu, objDeleteDuration, metrics.D(time.Since(start))) stats.Report(c.vu, objDeleteDuration, metrics.D(time.Since(start)))
return DeleteResponse{Success: true} return DeleteResponse{Success: true}
} }
@ -159,12 +171,12 @@ func (c *Client) Get(containerID, objectID string) GetResponse {
start := time.Now() start := time.Now()
var prm client.PrmObjectGet var prm client.PrmObjectGet
prm.ObjectID = &cliObjectID prm.ByID(cliObjectID)
prm.ContainerID = &cliContainerID prm.FromContainer(cliContainerID)
prm.Session = &tok prm.WithinSession(tok)
objSize := 0 var objSize = 0
err = get(c.cli, prm, c.vu.Context(), func(data []byte) { err = get(c.cli, prm, c.vu.Context(), c.bufsize, func(data []byte) {
objSize += len(data) objSize += len(data)
}) })
if err != nil { if err != nil {
@ -172,10 +184,9 @@ func (c *Client) Get(containerID, objectID string) GetResponse {
return GetResponse{Success: false, Error: err.Error()} return GetResponse{Success: false, Error: err.Error()}
} }
stats.Report(c.vu, objGetSuccess, 1) stats.Report(c.vu, objGetTotal, 1)
stats.Report(c.vu, objGetDuration, metrics.D(time.Since(start))) stats.Report(c.vu, objGetDuration, metrics.D(time.Since(start)))
stats.ReportDataReceived(c.vu, float64(objSize)) stats.ReportDataReceived(c.vu, float64(objSize))
stats.Report(c.vu, objGetData, float64(objSize))
return GetResponse{Success: true} return GetResponse{Success: true}
} }
@ -183,9 +194,10 @@ func get(
cli *client.Client, cli *client.Client,
prm client.PrmObjectGet, prm client.PrmObjectGet,
ctx context.Context, ctx context.Context,
bufSize int,
onDataChunk func(chunk []byte), onDataChunk func(chunk []byte),
) error { ) error {
buf := make([]byte, defaultBufferSize) var buf = make([]byte, bufSize)
objectReader, err := cli.ObjectGetInit(ctx, prm) objectReader, err := cli.ObjectGetInit(ctx, prm)
if err != nil { if err != nil {
@ -228,12 +240,12 @@ func (c *Client) VerifyHash(containerID, objectID, expectedHash string) VerifyHa
} }
var prm client.PrmObjectGet var prm client.PrmObjectGet
prm.ObjectID = &cliObjectID prm.ByID(cliObjectID)
prm.ContainerID = &cliContainerID prm.FromContainer(cliContainerID)
prm.Session = &tok prm.WithinSession(tok)
hasher := sha256.New() hasher := sha256.New()
err = get(c.cli, prm, c.vu.Context(), func(data []byte) { err = get(c.cli, prm, c.vu.Context(), c.bufsize, func(data []byte) {
hasher.Write(data) hasher.Write(data)
}) })
if err != nil { if err != nil {
@ -241,7 +253,7 @@ func (c *Client) VerifyHash(containerID, objectID, expectedHash string) VerifyHa
} }
actualHash := hex.EncodeToString(hasher.Sum(nil)) actualHash := hex.EncodeToString(hasher.Sum(nil))
if actualHash != expectedHash { if actualHash != expectedHash {
return VerifyHashResponse{Success: false, Error: "hash mismatch"} return VerifyHashResponse{Success: true, Error: "hash mismatch"}
} }
return VerifyHashResponse{Success: true} return VerifyHashResponse{Success: true}
@ -264,6 +276,16 @@ func (c *Client) PutContainer(params map[string]string) PutContainerResponse {
container.SetCreationTime(&cnr, time.Now()) container.SetCreationTime(&cnr, time.Now())
cnr.SetOwner(usr) cnr.SetOwner(usr)
if basicACLStr, ok := params["acl"]; ok {
var basicACL acl.Basic
err := basicACL.DecodeString(basicACLStr)
if err != nil {
return c.putCnrErrorResponse(err)
}
cnr.SetBasicACL(basicACL)
}
placementPolicyStr, ok := params["placement_policy"] placementPolicyStr, ok := params["placement_policy"]
if ok { if ok {
var placementPolicy netmap.PlacementPolicy var placementPolicy netmap.PlacementPolicy
@ -300,9 +322,10 @@ func (c *Client) PutContainer(params map[string]string) PutContainerResponse {
} }
start := time.Now() start := time.Now()
res, err := c.cli.ContainerPut(c.vu.Context(), client.PrmContainerPut{ var prm client.PrmContainerPut
Container: &cnr, prm.SetContainer(cnr)
})
res, err := c.cli.ContainerPut(c.vu.Context(), prm)
if err != nil { if err != nil {
return c.putCnrErrorResponse(err) return c.putCnrErrorResponse(err)
} }
@ -318,7 +341,7 @@ func (c *Client) PutContainer(params map[string]string) PutContainerResponse {
return PutContainerResponse{Success: true, ContainerID: res.ID().EncodeToString()} return PutContainerResponse{Success: true, ContainerID: res.ID().EncodeToString()}
} }
func (c *Client) Onsite(containerID string, payload datagen.Payload) PreparedObject { func (c *Client) Onsite(containerID string, payload goja.ArrayBuffer) PreparedObject {
maxObjectSize, epoch, hhDisabled, err := parseNetworkInfo(c.vu.Context(), c.cli) maxObjectSize, epoch, hhDisabled, err := parseNetworkInfo(c.vu.Context(), c.cli)
if err != nil { if err != nil {
panic(err) panic(err)
@ -345,7 +368,7 @@ func (c *Client) Onsite(containerID string, payload datagen.Payload) PreparedObj
obj.SetVersion(&apiVersion) obj.SetVersion(&apiVersion)
obj.SetType(object.TypeRegular) obj.SetType(object.TypeRegular)
obj.SetContainerID(cliContainerID) obj.SetContainerID(cliContainerID)
obj.SetOwnerID(owner) obj.SetOwnerID(&owner)
obj.SetPayloadSize(uint64(ln)) obj.SetPayloadSize(uint64(ln))
obj.SetCreationEpoch(epoch) obj.SetCreationEpoch(epoch)
@ -361,10 +384,10 @@ func (c *Client) Onsite(containerID string, payload datagen.Payload) PreparedObj
vu: c.vu, vu: c.vu,
key: c.key, key: c.key,
cli: c.cli, cli: c.cli,
bufsize: c.bufsize,
hdr: *obj, hdr: *obj,
payload: data, payload: data,
prepareLocally: c.prepareLocally,
maxObjSize: c.maxObjSize,
} }
} }
@ -390,7 +413,7 @@ func (p PreparedObject) Put(headers map[string]string) PutResponse {
return PutResponse{Success: false, Error: err.Error()} return PutResponse{Success: false, Error: err.Error()}
} }
_, err = put(p.vu, p.cli, p.prepareLocally, nil, &obj, datagen.NewFixedPayload(p.payload), 0, p.maxObjSize) _, err = put(p.vu, p.bufsize, p.cli, nil, &obj, p.payload)
if err != nil { if err != nil {
return PutResponse{Success: false, Error: err.Error()} return PutResponse{Success: false, Error: err.Error()}
} }
@ -398,44 +421,18 @@ func (p PreparedObject) Put(headers map[string]string) PutResponse {
return PutResponse{Success: true, ObjectID: id.String()} return PutResponse{Success: true, ObjectID: id.String()}
} }
type epochSource uint64 func put(vu modules.VU, bufSize int, cli *client.Client, tok *session.Object,
hdr *object.Object, payload []byte) (*client.ResObjectPut, error) {
func (s epochSource) CurrentEpoch() uint64 {
return uint64(s)
}
func put(vu modules.VU, cli *client.Client, prepareLocally bool, tok *session.Object,
hdr *object.Object, payload datagen.Payload, chunkSize int, maxObjSize uint64,
) (*client.ResObjectPut, error) {
bufSize := defaultBufferSize
if chunkSize > 0 {
bufSize = chunkSize
}
buf := make([]byte, bufSize) buf := make([]byte, bufSize)
rdr := payload.Reader() rdr := bytes.NewReader(payload)
sz := payload.Size() sz := rdr.Size()
// starting upload // starting upload
start := time.Now() start := time.Now()
var prm client.PrmObjectPutInit var prm client.PrmObjectPutInit
if tok != nil { if tok != nil {
prm.Session = tok prm.WithinSession(*tok)
}
if chunkSize > 0 {
prm.MaxChunkLength = chunkSize
}
if prepareLocally {
ni, err := networkInfoCache.getOrFetch(vu.Context(), cli)
if err != nil {
return nil, err
}
prm.MaxSize = ni.MaxObjectSize()
prm.EpochSource = epochSource(ni.CurrentEpoch())
prm.WithoutHomomorphHash = true
if maxObjSize > 0 {
prm.MaxSize = maxObjSize
}
} }
objectWriter, err := cli.ObjectPutInit(vu.Context(), prm) objectWriter, err := cli.ObjectPutInit(vu.Context(), prm)
@ -444,30 +441,29 @@ func put(vu modules.VU, cli *client.Client, prepareLocally bool, tok *session.Ob
return nil, err return nil, err
} }
if !objectWriter.WriteHeader(vu.Context(), *hdr) { if !objectWriter.WriteHeader(*hdr) {
stats.Report(vu, objPutFails, 1) stats.Report(vu, objPutFails, 1)
_, err = objectWriter.Close(vu.Context()) _, err = objectWriter.Close()
return nil, err return nil, err
} }
n, _ := rdr.Read(buf) n, _ := rdr.Read(buf)
for n > 0 { for n > 0 {
if !objectWriter.WritePayloadChunk(vu.Context(), buf[:n]) { if !objectWriter.WritePayloadChunk(buf[:n]) {
break break
} }
n, _ = rdr.Read(buf) n, _ = rdr.Read(buf)
} }
resp, err := objectWriter.Close(vu.Context()) resp, err := objectWriter.Close()
if err != nil { if err != nil {
stats.Report(vu, objPutFails, 1) stats.Report(vu, objPutFails, 1)
return nil, err return nil, err
} }
stats.Report(vu, objPutSuccess, 1) stats.Report(vu, objPutTotal, 1)
stats.ReportDataSent(vu, float64(sz)) stats.ReportDataSent(vu, float64(sz))
stats.Report(vu, objPutDuration, metrics.D(time.Since(start))) stats.Report(vu, objPutDuration, metrics.D(time.Since(start)))
stats.Report(vu, objPutData, float64(sz))
return resp, nil return resp, nil
} }
@ -495,9 +491,10 @@ func (x *waitParams) setDefaults() {
func (c *Client) waitForContainerPresence(ctx context.Context, cnrID cid.ID, wp *waitParams) error { func (c *Client) waitForContainerPresence(ctx context.Context, cnrID cid.ID, wp *waitParams) error {
return waitFor(ctx, wp, func(ctx context.Context) bool { return waitFor(ctx, wp, func(ctx context.Context) bool {
_, err := c.cli.ContainerGet(ctx, client.PrmContainerGet{ var prm client.PrmContainerGet
ContainerID: &cnrID, prm.SetContainer(cnrID)
})
_, err := c.cli.ContainerGet(ctx, prm)
return err == nil return err == nil
}) })
} }

View file

@ -8,7 +8,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
frostfsecdsa "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto/ecdsa" frostfsecdsa "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto/ecdsa"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
"git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/stats"
"github.com/google/uuid" "github.com/google/uuid"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys" "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"go.k6.io/k6/js/modules" "go.k6.io/k6/js/modules"
@ -29,9 +28,9 @@ var (
_ modules.Instance = &Native{} _ modules.Instance = &Native{}
_ modules.Module = &RootModule{} _ modules.Module = &RootModule{}
objPutSuccess, objPutFails, objPutDuration, objPutData *metrics.Metric objPutTotal, objPutFails, objPutDuration *metrics.Metric
objGetSuccess, objGetFails, objGetDuration, objGetData *metrics.Metric objGetTotal, objGetFails, objGetDuration *metrics.Metric
objDeleteSuccess, objDeleteFails, objDeleteDuration *metrics.Metric objDeleteTotal, objDeleteFails, objDeleteDuration *metrics.Metric
cnrPutTotal, cnrPutFails, cnrPutDuration *metrics.Metric cnrPutTotal, cnrPutFails, cnrPutDuration *metrics.Metric
) )
@ -52,17 +51,13 @@ func (n *Native) Exports() modules.Exports {
return modules.Exports{Default: n} return modules.Exports{Default: n}
} }
func (n *Native) Connect(endpoint, hexPrivateKey string, dialTimeout, streamTimeout int, prepareLocally bool, maxObjSize int) (*Client, error) { func (n *Native) Connect(endpoint, hexPrivateKey string, dialTimeout, streamTimeout int) (*Client, error) {
var ( var (
cli client.Client cli client.Client
pk *keys.PrivateKey pk *keys.PrivateKey
err error err error
) )
if maxObjSize < 0 {
return nil, fmt.Errorf("max object size value must be positive")
}
pk, err = keys.NewPrivateKey() pk, err = keys.NewPrivateKey()
if len(hexPrivateKey) != 0 { if len(hexPrivateKey) != 0 {
pk, err = keys.NewPrivateKeyFromHex(hexPrivateKey) pk, err = keys.NewPrivateKeyFromHex(hexPrivateKey)
@ -72,18 +67,19 @@ func (n *Native) Connect(endpoint, hexPrivateKey string, dialTimeout, streamTime
} }
var prmInit client.PrmInit var prmInit client.PrmInit
prmInit.Key = pk.PrivateKey prmInit.ResolveFrostFSFailures()
prmInit.SetDefaultPrivateKey(pk.PrivateKey)
cli.Init(prmInit) cli.Init(prmInit)
var prmDial client.PrmDial var prmDial client.PrmDial
prmDial.Endpoint = endpoint prmDial.SetServerURI(endpoint)
if dialTimeout > 0 { if dialTimeout > 0 {
prmDial.DialTimeout = time.Duration(dialTimeout) * time.Second prmDial.SetTimeout(time.Duration(dialTimeout) * time.Second)
} }
if streamTimeout > 0 { if streamTimeout > 0 {
prmDial.StreamTimeout = time.Duration(streamTimeout) * time.Second prmDial.SetStreamTimeout(time.Duration(streamTimeout) * time.Second)
} }
err = cli.Dial(n.vu.Context(), prmDial) err = cli.Dial(n.vu.Context(), prmDial)
@ -93,9 +89,9 @@ func (n *Native) Connect(endpoint, hexPrivateKey string, dialTimeout, streamTime
// generate session token // generate session token
exp := uint64(math.MaxUint64) exp := uint64(math.MaxUint64)
sessionResp, err := cli.SessionCreate(n.vu.Context(), client.PrmSessionCreate{ var prmSessionCreate client.PrmSessionCreate
Expiration: exp, prmSessionCreate.SetExp(exp)
}) sessionResp, err := cli.SessionCreate(n.vu.Context(), prmSessionCreate)
if err != nil { if err != nil {
return nil, fmt.Errorf("dial endpoint: %s %w", endpoint, err) return nil, fmt.Errorf("dial endpoint: %s %w", endpoint, err)
} }
@ -118,42 +114,29 @@ func (n *Native) Connect(endpoint, hexPrivateKey string, dialTimeout, streamTime
tok.SetAuthKey(&key) tok.SetAuthKey(&key)
tok.SetExp(exp) tok.SetExp(exp)
if prepareLocally && maxObjSize > 0 {
res, err := cli.NetworkInfo(n.vu.Context(), client.PrmNetworkInfo{})
if err != nil {
return nil, err
}
if uint64(maxObjSize) > res.Info().MaxObjectSize() {
return nil, fmt.Errorf("max object size must be not greater than %d bytes", res.Info().MaxObjectSize())
}
}
// register metrics // register metrics
registry := metrics.NewRegistry()
objPutTotal, _ = registry.NewMetric("frostfs_obj_put_total", metrics.Counter)
objPutFails, _ = registry.NewMetric("frostfs_obj_put_fails", metrics.Counter)
objPutDuration, _ = registry.NewMetric("frostfs_obj_put_duration", metrics.Trend, metrics.Time)
objPutSuccess, _ = stats.Registry.NewMetric("frostfs_obj_put_success", metrics.Counter) objGetTotal, _ = registry.NewMetric("frostfs_obj_get_total", metrics.Counter)
objPutFails, _ = stats.Registry.NewMetric("frostfs_obj_put_fails", metrics.Counter) objGetFails, _ = registry.NewMetric("frostfs_obj_get_fails", metrics.Counter)
objPutDuration, _ = stats.Registry.NewMetric("frostfs_obj_put_duration", metrics.Trend, metrics.Time) objGetDuration, _ = registry.NewMetric("frostfs_obj_get_duration", metrics.Trend, metrics.Time)
objPutData, _ = stats.Registry.NewMetric("frostfs_obj_put_bytes", metrics.Counter, metrics.Data)
objGetSuccess, _ = stats.Registry.NewMetric("frostfs_obj_get_success", metrics.Counter) objDeleteTotal, _ = registry.NewMetric("frostfs_obj_delete_total", metrics.Counter)
objGetFails, _ = stats.Registry.NewMetric("frostfs_obj_get_fails", metrics.Counter) objDeleteFails, _ = registry.NewMetric("frostfs_obj_delete_fails", metrics.Counter)
objGetDuration, _ = stats.Registry.NewMetric("frostfs_obj_get_duration", metrics.Trend, metrics.Time) objDeleteDuration, _ = registry.NewMetric("frostfs_obj_delete_duration", metrics.Trend, metrics.Time)
objGetData, _ = stats.Registry.NewMetric("frostfs_obj_get_bytes", metrics.Counter, metrics.Data)
objDeleteSuccess, _ = stats.Registry.NewMetric("frostfs_obj_delete_success", metrics.Counter) cnrPutTotal, _ = registry.NewMetric("frostfs_cnr_put_total", metrics.Counter)
objDeleteFails, _ = stats.Registry.NewMetric("frostfs_obj_delete_fails", metrics.Counter) cnrPutFails, _ = registry.NewMetric("frostfs_cnr_put_fails", metrics.Counter)
objDeleteDuration, _ = stats.Registry.NewMetric("frostfs_obj_delete_duration", metrics.Trend, metrics.Time) cnrPutDuration, _ = registry.NewMetric("frostfs_cnr_put_duration", metrics.Trend, metrics.Time)
cnrPutTotal, _ = stats.Registry.NewMetric("frostfs_cnr_put_total", metrics.Counter)
cnrPutFails, _ = stats.Registry.NewMetric("frostfs_cnr_put_fails", metrics.Counter)
cnrPutDuration, _ = stats.Registry.NewMetric("frostfs_cnr_put_duration", metrics.Trend, metrics.Time)
return &Client{ return &Client{
vu: n.vu, vu: n.vu,
key: pk.PrivateKey, key: pk.PrivateKey,
tok: tok, tok: tok,
cli: &cli, cli: &cli,
prepareLocally: prepareLocally, bufsize: defaultBufferSize,
maxObjSize: uint64(maxObjSize),
}, nil }, nil
} }

View file

@ -1,124 +0,0 @@
package registry
import (
"fmt"
"os"
)
type ObjExporter struct {
selector *ObjSelector
}
type PreGenerateInfo struct {
Buckets []string `json:"buckets"`
Containers []string `json:"containers"`
Objects []ObjInfo `json:"objects"`
ObjSize string `json:"obj_size"`
}
type ObjInfo struct {
Bucket string `json:"bucket"`
Object string `json:"object"`
CID string `json:"cid"`
OID string `json:"oid"`
}
func NewObjExporter(selector *ObjSelector) *ObjExporter {
return &ObjExporter{selector: selector}
}
func (o *ObjExporter) ExportJSONPreGen(fileName string) error {
f, err := os.Create(fileName)
if err != nil {
return err
}
defer f.Close()
// there can be a lot of object, so manually form json
if _, err = f.WriteString(`{"objects":[`); err != nil {
return err
}
bucketMap := make(map[string]struct{})
containerMap := make(map[string]struct{})
count, err := o.selector.Count()
if err != nil {
return err
}
var comma string
for i := 0; i < count; i++ {
info := o.selector.NextObject()
if info == nil {
break
}
if err = writeObjectInfo(comma, info, f); err != nil {
return err
}
if i == 0 {
comma = ","
}
if info.S3Bucket != "" {
bucketMap[info.S3Bucket] = struct{}{}
}
if info.CID != "" {
containerMap[info.CID] = struct{}{}
}
}
if _, err = f.WriteString(`]`); err != nil {
return err
}
if len(bucketMap) > 0 {
if err = writeContainerInfo("buckets", bucketMap, f); err != nil {
return err
}
}
if len(containerMap) > 0 {
if err = writeContainerInfo("containers", containerMap, f); err != nil {
return err
}
}
if _, err = f.WriteString(`}`); err != nil {
return err
}
return nil
}
func writeObjectInfo(comma string, info *ObjectInfo, f *os.File) (err error) {
var res string
if info.S3Bucket != "" || info.S3Key != "" {
res = fmt.Sprintf(`%s{"bucket":"%s","object":"%s"}`, comma, info.S3Bucket, info.S3Key)
} else {
res = fmt.Sprintf(`%s{"cid":"%s","oid":"%s"}`, comma, info.CID, info.OID)
}
_, err = f.WriteString(res)
return err
}
func writeContainerInfo(attrName string, bucketMap map[string]struct{}, f *os.File) (err error) {
if _, err = f.WriteString(fmt.Sprintf(`,"%s":[`, attrName)); err != nil {
return err
}
i := 0
comma := ""
for bucket := range bucketMap {
if _, err = f.WriteString(fmt.Sprintf(`%s"%s"`, comma, bucket)); err != nil {
return err
}
if i == 0 {
comma = ","
}
i++
}
_, err = f.WriteString(`]`)
return err
}

View file

@ -1,156 +0,0 @@
package registry
import (
"context"
"encoding/json"
"fmt"
"math/rand"
"os"
"path/filepath"
"slices"
"strings"
"testing"
"github.com/stretchr/testify/require"
)
type expectedResult struct {
mode string
objects []ObjectInfo
dir string
dbName string
jsonName string
}
func TestObjectExporter(t *testing.T) {
names := []string{"s3", "grpc"}
for _, name := range names {
t.Run(name, runExportTest)
t.Run(name+"-changed", runExportChangedTest)
t.Run(name+"-empty", runExportEmptyTest)
}
}
func runExportTest(t *testing.T) {
expected := getExpectedResult(t)
objReg := getFilledRegistry(t, expected)
objExp := NewObjExporter(NewObjSelector(objReg, 0, SelectorOneshot, &ObjFilter{Status: statusCreated}))
require.NoError(t, objExp.ExportJSONPreGen(expected.jsonName))
require.NoError(t, checkExported(expected.objects, expected.jsonName))
}
func runExportChangedTest(t *testing.T) {
expected := getExpectedResult(t)
objReg := getFilledRegistry(t, expected)
newStatus := randString(10)
num := randPositiveInt(1, len(expected.objects))
changedObjects := make([]ObjectInfo, num)
require.Equal(t, num, copy(changedObjects[:], expected.objects[:]))
sel := NewObjSelector(objReg, 0, SelectorOneshot, &ObjFilter{Status: statusCreated})
for i := range changedObjects {
changedObjects[i].Status = newStatus
require.NoError(t, objReg.SetObjectStatus(sel.NextObject().Id, statusCreated, newStatus))
}
objExp := NewObjExporter(NewObjSelector(objReg, 0, SelectorOneshot, &ObjFilter{Status: newStatus}))
require.NoError(t, objExp.ExportJSONPreGen(expected.jsonName))
require.NoError(t, checkExported(changedObjects, expected.jsonName))
}
func runExportEmptyTest(t *testing.T) {
expected := getExpectedResult(t)
expected.objects = make([]ObjectInfo, 0)
objReg := getFilledRegistry(t, expected)
objExp := NewObjExporter(NewObjSelector(objReg, 0, SelectorOneshot, &ObjFilter{Status: statusCreated}))
require.NoError(t, objExp.ExportJSONPreGen(expected.jsonName))
require.NoError(t, checkExported(expected.objects, expected.jsonName))
}
func getExpectedResult(t *testing.T) expectedResult {
num := randPositiveInt(2, 100)
mode := getMode(t.Name())
require.NotEqual(t, "", mode, "test mode should contain either \"s3\" or\"grpc\"")
dir := t.TempDir()
res := expectedResult{
mode: mode,
objects: generateObjectInfo(num, t.Name()),
dir: dir,
dbName: filepath.Join(dir, "registry-"+mode+".db"),
jsonName: filepath.Join(dir, "registry-"+mode+".json"),
}
return res
}
func randPositiveInt(min, max int) int {
return rand.Intn(max-min) + min
}
func getMode(name string) (res string) {
if strings.Contains(name, "s3") {
res = filepath.Base(name)
}
if strings.Contains(name, "grpc") {
res = filepath.Base(name)
}
return res
}
func generateObjectInfo(num int, mode string) []ObjectInfo {
res := make([]ObjectInfo, num)
for i := range res {
res[i] = randomObjectInfo()
if !strings.Contains(mode, "s3") {
res[i].S3Bucket = ""
res[i].S3Key = ""
}
if !strings.Contains(mode, "grpc") {
res[i].CID = ""
res[i].OID = ""
}
}
return res
}
func getFilledRegistry(t *testing.T, expected expectedResult) *ObjRegistry {
objReg := NewObjRegistry(context.Background(), expected.dbName)
for i := range expected.objects {
require.NoError(t, objReg.AddObject(expected.objects[i].CID, expected.objects[i].OID, expected.objects[i].S3Bucket, expected.objects[i].S3Key, expected.objects[i].PayloadHash))
}
return objReg
}
func checkExported(expected []ObjectInfo, fileName string) error {
file, err := os.ReadFile(fileName)
if err != nil {
return err
}
if !json.Valid(file) {
return fmt.Errorf("exported json file %s is invalid", fileName)
}
var actual PreGenerateInfo
if json.Unmarshal(file, &actual) != nil {
return err
}
if len(expected) != len(actual.Objects) {
return fmt.Errorf("expected len(): %v, got len(): %v", len(expected), len(actual.Objects))
}
for i := range expected {
if !slices.ContainsFunc(actual.Objects, func(oi ObjInfo) bool {
compareS3 := oi.Bucket == expected[i].S3Bucket && oi.Object == expected[i].S3Key
comparegRPC := oi.CID == expected[i].CID && oi.OID == expected[i].OID
return compareS3 && comparegRPC
}) {
return fmt.Errorf("object %v not found in exported json file %s", expected[i], fileName)
}
}
return nil
}

View file

@ -101,7 +101,7 @@ func randomObjectInfo() ObjectInfo {
func randString(n int) string { func randString(n int) string {
var sb strings.Builder var sb strings.Builder
for i := 0; i < n; i++ { for i := 0; i < n; i++ {
sb.WriteRune('a' + rune(rand.Int31())%('z'-'a'+1)) sb.WriteRune('a' + rune(rand.Int())%('z'-'a'+1))
} }
return sb.String() return sb.String()
} }

View file

@ -44,7 +44,7 @@ func NewObjRegistry(ctx context.Context, dbFilePath string) *ObjRegistry {
} }
func (o *ObjRegistry) AddObject(cid, oid, s3Bucket, s3Key, payloadHash string) error { func (o *ObjRegistry) AddObject(cid, oid, s3Bucket, s3Key, payloadHash string) error {
return o.boltDB.Batch(func(tx *bbolt.Tx) error { return o.boltDB.Update(func(tx *bbolt.Tx) error {
b, err := tx.CreateBucketIfNotExists([]byte(statusCreated)) b, err := tx.CreateBucketIfNotExists([]byte(statusCreated))
if err != nil { if err != nil {
return err return err
@ -75,7 +75,7 @@ func (o *ObjRegistry) AddObject(cid, oid, s3Bucket, s3Key, payloadHash string) e
} }
func (o *ObjRegistry) SetObjectStatus(id uint64, oldStatus, newStatus string) error { func (o *ObjRegistry) SetObjectStatus(id uint64, oldStatus, newStatus string) error {
return o.boltDB.Batch(func(tx *bbolt.Tx) error { return o.boltDB.Update(func(tx *bbolt.Tx) error {
oldB := tx.Bucket([]byte(oldStatus)) oldB := tx.Bucket([]byte(oldStatus))
if oldB == nil { if oldB == nil {
return fmt.Errorf("bucket doesn't exist: '%s'", oldStatus) return fmt.Errorf("bucket doesn't exist: '%s'", oldStatus)
@ -110,7 +110,7 @@ func (o *ObjRegistry) SetObjectStatus(id uint64, oldStatus, newStatus string) er
} }
func (o *ObjRegistry) DeleteObject(id uint64) error { func (o *ObjRegistry) DeleteObject(id uint64) error {
return o.boltDB.Batch(func(tx *bbolt.Tx) error { return o.boltDB.Update(func(tx *bbolt.Tx) error {
return tx.ForEach(func(_ []byte, b *bbolt.Bucket) error { return tx.ForEach(func(_ []byte, b *bbolt.Bucket) error {
return b.Delete(encodeId(id)) return b.Delete(encodeId(id))
}) })

View file

@ -3,15 +3,12 @@ package registry
import ( import (
"context" "context"
"fmt" "fmt"
"sync"
"time" "time"
"github.com/nspcc-dev/neo-go/pkg/io" "github.com/nspcc-dev/neo-go/pkg/io"
"go.etcd.io/bbolt" "go.etcd.io/bbolt"
) )
const nextObjectTimeout = 10 * time.Second
type ObjFilter struct { type ObjFilter struct {
Status string Status string
Age int Age int
@ -23,9 +20,6 @@ type ObjSelector struct {
boltDB *bbolt.DB boltDB *bbolt.DB
filter *ObjFilter filter *ObjFilter
cacheSize int cacheSize int
kind SelectorKind
// Sync synchronizes VU used for deletion.
Sync sync.WaitGroup
} }
// objectSelectCache is the default maximum size of a batch to select from DB. // objectSelectCache is the default maximum size of a batch to select from DB.
@ -33,7 +27,7 @@ const objectSelectCache = 1000
// NewObjSelector creates a new instance of object selector that can iterate over // NewObjSelector creates a new instance of object selector that can iterate over
// objects in the specified registry. // objects in the specified registry.
func NewObjSelector(registry *ObjRegistry, selectionSize int, kind SelectorKind, filter *ObjFilter) *ObjSelector { func NewObjSelector(registry *ObjRegistry, selectionSize int, filter *ObjFilter) *ObjSelector {
if selectionSize <= 0 { if selectionSize <= 0 {
selectionSize = objectSelectCache selectionSize = objectSelectCache
} }
@ -46,7 +40,6 @@ func NewObjSelector(registry *ObjRegistry, selectionSize int, kind SelectorKind,
filter: filter, filter: filter,
objChan: make(chan *ObjectInfo, selectionSize*2), objChan: make(chan *ObjectInfo, selectionSize*2),
cacheSize: selectionSize, cacheSize: selectionSize,
kind: kind,
} }
go objSelector.selectLoop() go objSelector.selectLoop()
@ -62,21 +55,12 @@ func NewObjSelector(registry *ObjRegistry, selectionSize int, kind SelectorKind,
// - underlying registry context is done, nil objects will be returned on the // - underlying registry context is done, nil objects will be returned on the
// currently blocked and every further NextObject calls. // currently blocked and every further NextObject calls.
func (o *ObjSelector) NextObject() *ObjectInfo { func (o *ObjSelector) NextObject() *ObjectInfo {
if o.kind == SelectorOneshot {
return <-o.objChan return <-o.objChan
}
select {
case <-time.After(nextObjectTimeout):
return nil
case obj := <-o.objChan:
return obj
}
} }
// Count returns total number of objects that match filter of the selector. // Count returns total number of objects that match filter of the selector.
func (o *ObjSelector) Count() (int, error) { func (o *ObjSelector) Count() (int, error) {
count := 0 var count = 0
err := o.boltDB.View(func(tx *bbolt.Tx) error { err := o.boltDB.View(func(tx *bbolt.Tx) error {
b := tx.Bucket([]byte(o.filter.Status)) b := tx.Bucket([]byte(o.filter.Status))
if b == nil { if b == nil {
@ -176,23 +160,15 @@ func (o *ObjSelector) selectLoop() {
} }
} }
if o.kind == SelectorOneshot && len(cache) != o.cacheSize { if len(cache) != o.cacheSize {
return
}
if o.kind != SelectorLooped && len(cache) != o.cacheSize {
// no more objects, wait a little; the logic could be improved. // no more objects, wait a little; the logic could be improved.
select { select {
case <-time.After(time.Second): case <-time.After(time.Second * time.Duration(o.filter.Age/2)):
case <-o.ctx.Done(): case <-o.ctx.Done():
return return
} }
} }
if o.kind == SelectorLooped && len(cache) != o.cacheSize {
lastID = 0
}
// clean handled objects // clean handled objects
cache = cache[:0] cache = cache[:0]
} }

View file

@ -74,35 +74,7 @@ func (r *Registry) open(dbFilePath string) *ObjRegistry {
return registry return registry
} }
// SelectorKind represents selector behaviour when no items are available.
type SelectorKind byte
const (
// SelectorAwaiting waits for a new item to arrive.
// This selector visits each item exactly once and can be used when items
// to select are being pushed into registry concurrently.
SelectorAwaiting = iota
// SelectorLooped rewinds cursor to the start after all items have been read.
// It can encounter duplicates and should be used mostly for read scenarious.
SelectorLooped
// SelectorOneshot visits each item exactly once and exits immediately afterwards.
// It may be used to artificially abort the test after all items were processed.
SelectorOneshot
)
func (r *Registry) GetSelector(dbFilePath string, name string, cacheSize int, filter map[string]string) *ObjSelector { func (r *Registry) GetSelector(dbFilePath string, name string, cacheSize int, filter map[string]string) *ObjSelector {
return r.getSelectorInternal(dbFilePath, name, cacheSize, SelectorAwaiting, filter)
}
func (r *Registry) GetLoopedSelector(dbFilePath string, name string, cacheSize int, filter map[string]string) *ObjSelector {
return r.getSelectorInternal(dbFilePath, name, cacheSize, SelectorLooped, filter)
}
func (r *Registry) GetOneshotSelector(dbFilePath string, name string, cacheSize int, filter map[string]string) *ObjSelector {
return r.getSelectorInternal(dbFilePath, name, cacheSize, SelectorOneshot, filter)
}
func (r *Registry) getSelectorInternal(dbFilePath string, name string, cacheSize int, kind SelectorKind, filter map[string]string) *ObjSelector {
objFilter, err := parseFilter(filter) objFilter, err := parseFilter(filter)
if err != nil { if err != nil {
panic(err) panic(err)
@ -114,7 +86,7 @@ func (r *Registry) getSelectorInternal(dbFilePath string, name string, cacheSize
selector := r.root.selectors[name] selector := r.root.selectors[name]
if selector == nil { if selector == nil {
registry := r.open(dbFilePath) registry := r.open(dbFilePath)
selector = NewObjSelector(registry, cacheSize, kind, objFilter) selector = NewObjSelector(registry, cacheSize, objFilter)
r.root.selectors[name] = selector r.root.selectors[name] = selector
} else if !reflect.DeepEqual(selector.filter, objFilter) { } else if !reflect.DeepEqual(selector.filter, objFilter) {
panic(fmt.Sprintf("selector %s already has been created with a different filter", name)) panic(fmt.Sprintf("selector %s already has been created with a different filter", name))
@ -122,10 +94,6 @@ func (r *Registry) getSelectorInternal(dbFilePath string, name string, cacheSize
return selector return selector
} }
func (r *Registry) GetExporter(selector *ObjSelector) *ObjExporter {
return NewObjExporter(selector)
}
func parseFilter(filter map[string]string) (*ObjFilter, error) { func parseFilter(filter map[string]string) (*ObjFilter, error) {
objFilter := ObjFilter{} objFilter := ObjFilter{}
objFilter.Status = filter["status"] objFilter.Status = filter["status"]

View file

@ -1,19 +1,18 @@
package s3 package s3
import ( import (
"bytes"
"context" "context"
"crypto/sha256" "crypto/sha256"
"encoding/hex" "encoding/hex"
"fmt"
"strconv" "strconv"
"time" "time"
"git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/datagen"
"git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/stats" "git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/stats"
"github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/feature/s3/manager"
"github.com/aws/aws-sdk-go-v2/service/s3" "github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/dop251/goja"
"go.k6.io/k6/js/modules" "go.k6.io/k6/js/modules"
"go.k6.io/k6/metrics" "go.k6.io/k6/metrics"
) )
@ -50,9 +49,9 @@ type (
} }
) )
func (c *Client) Put(bucket, key string, payload datagen.Payload) PutResponse { func (c *Client) Put(bucket, key string, payload goja.ArrayBuffer) PutResponse {
rdr := payload.Reader() rdr := bytes.NewReader(payload.Bytes())
sz := payload.Size() sz := rdr.Size()
start := time.Now() start := time.Now()
_, err := c.cli.PutObject(c.vu.Context(), &s3.PutObjectInput{ _, err := c.cli.PutObject(c.vu.Context(), &s3.PutObjectInput{
@ -65,44 +64,9 @@ func (c *Client) Put(bucket, key string, payload datagen.Payload) PutResponse {
return PutResponse{Success: false, Error: err.Error()} return PutResponse{Success: false, Error: err.Error()}
} }
stats.Report(c.vu, objPutSuccess, 1) stats.Report(c.vu, objPutTotal, 1)
stats.ReportDataSent(c.vu, float64(sz)) stats.ReportDataSent(c.vu, float64(sz))
stats.Report(c.vu, objPutDuration, metrics.D(time.Since(start))) stats.Report(c.vu, objPutDuration, metrics.D(time.Since(start)))
stats.Report(c.vu, objPutData, float64(sz))
return PutResponse{Success: true}
}
const multipartUploadMinPartSize = 5 * 1024 * 1024 // 5MB
func (c *Client) Multipart(bucket, key string, objPartSize, concurrency int, payload datagen.Payload) PutResponse {
if objPartSize < multipartUploadMinPartSize {
stats.Report(c.vu, objPutFails, 1)
return PutResponse{Success: false, Error: fmt.Sprintf("part size '%d' must be greater than '%d'(5 MB)", objPartSize, multipartUploadMinPartSize)}
}
start := time.Now()
uploader := manager.NewUploader(c.cli, func(u *manager.Uploader) {
u.PartSize = int64(objPartSize)
u.Concurrency = concurrency
})
payloadReader := payload.Reader()
sz := payload.Size()
_, err := uploader.Upload(c.vu.Context(), &s3.PutObjectInput{
Bucket: aws.String(bucket),
Key: aws.String(key),
Body: payloadReader,
})
if err != nil {
stats.Report(c.vu, objPutFails, 1)
return PutResponse{Success: false, Error: err.Error()}
}
stats.Report(c.vu, objPutSuccess, 1)
stats.ReportDataSent(c.vu, float64(sz))
stats.Report(c.vu, objPutDuration, metrics.D(time.Since(start)))
stats.Report(c.vu, objPutData, float64(sz))
return PutResponse{Success: true} return PutResponse{Success: true}
} }
@ -118,7 +82,7 @@ func (c *Client) Delete(bucket, key string) DeleteResponse {
return DeleteResponse{Success: false, Error: err.Error()} return DeleteResponse{Success: false, Error: err.Error()}
} }
stats.Report(c.vu, objDeleteSuccess, 1) stats.Report(c.vu, objDeleteTotal, 1)
stats.Report(c.vu, objDeleteDuration, metrics.D(time.Since(start))) stats.Report(c.vu, objDeleteDuration, metrics.D(time.Since(start)))
return DeleteResponse{Success: true} return DeleteResponse{Success: true}
} }
@ -126,7 +90,7 @@ func (c *Client) Delete(bucket, key string) DeleteResponse {
func (c *Client) Get(bucket, key string) GetResponse { func (c *Client) Get(bucket, key string) GetResponse {
start := time.Now() start := time.Now()
objSize := 0 var objSize = 0
err := get(c.cli, bucket, key, func(chunk []byte) { err := get(c.cli, bucket, key, func(chunk []byte) {
objSize += len(chunk) objSize += len(chunk)
}) })
@ -135,77 +99,12 @@ func (c *Client) Get(bucket, key string) GetResponse {
return GetResponse{Success: false, Error: err.Error()} return GetResponse{Success: false, Error: err.Error()}
} }
stats.Report(c.vu, objGetSuccess, 1) stats.Report(c.vu, objGetTotal, 1)
stats.Report(c.vu, objGetDuration, metrics.D(time.Since(start))) stats.Report(c.vu, objGetDuration, metrics.D(time.Since(start)))
stats.ReportDataReceived(c.vu, float64(objSize)) stats.ReportDataReceived(c.vu, float64(objSize))
stats.Report(c.vu, objGetData, float64(objSize))
return GetResponse{Success: true} return GetResponse{Success: true}
} }
// DeleteObjectVersion deletes object version with specified versionID.
// If version argument is empty, deletes all versions and delete-markers of specified object.
func (c *Client) DeleteObjectVersion(bucket, key, version string) DeleteResponse {
var toDelete []types.ObjectIdentifier
if version != "" {
toDelete = append(toDelete, types.ObjectIdentifier{
Key: aws.String(key),
VersionId: aws.String(version),
})
} else {
versions, err := c.cli.ListObjectVersions(c.vu.Context(), &s3.ListObjectVersionsInput{
Bucket: aws.String(bucket),
Prefix: aws.String(key),
})
if err != nil {
stats.Report(c.vu, objDeleteFails, 1)
return DeleteResponse{Success: false, Error: err.Error()}
}
toDelete = filterObjectVersions(versions, key)
}
if len(toDelete) == 0 {
return c.Delete(bucket, key)
} else {
_, err := c.cli.DeleteObjects(c.vu.Context(), &s3.DeleteObjectsInput{
Bucket: aws.String(bucket),
Delete: &types.Delete{
Objects: toDelete,
Quiet: true,
},
})
if err != nil {
stats.Report(c.vu, objDeleteFails, 1)
return DeleteResponse{Success: false, Error: err.Error()}
}
}
return DeleteResponse{Success: true}
}
func filterObjectVersions(versions *s3.ListObjectVersionsOutput, key string) []types.ObjectIdentifier {
var result []types.ObjectIdentifier
for _, v := range versions.Versions {
if *v.Key == key {
result = append(result, types.ObjectIdentifier{
Key: v.Key,
VersionId: v.VersionId,
})
}
}
for _, marker := range versions.DeleteMarkers {
if *marker.Key == key {
result = append(result, types.ObjectIdentifier{
Key: marker.Key,
VersionId: marker.VersionId,
})
}
}
return result
}
func get( func get(
c *s3.Client, c *s3.Client,
bucket string, bucket string,
@ -244,7 +143,7 @@ func (c *Client) VerifyHash(bucket, key, expectedHash string) VerifyHashResponse
} }
actualHash := hex.EncodeToString(hasher.Sum(nil)) actualHash := hex.EncodeToString(hasher.Sum(nil))
if actualHash != expectedHash { if actualHash != expectedHash {
return VerifyHashResponse{Success: false, Error: "hash mismatch"} return VerifyHashResponse{Success: true, Error: "hash mismatch"}
} }
return VerifyHashResponse{Success: true} return VerifyHashResponse{Success: true}
@ -279,27 +178,7 @@ func (c *Client) CreateBucket(bucket string, params map[string]string) CreateBuc
return CreateBucketResponse{Success: false, Error: err.Error()} return CreateBucketResponse{Success: false, Error: err.Error()}
} }
var versioning bool stats.Report(c.vu, createBucketTotal, 1)
if strVersioned, ok := params["versioning"]; ok {
if versioning, err = strconv.ParseBool(strVersioned); err != nil {
stats.Report(c.vu, createBucketFails, 1)
return CreateBucketResponse{Success: false, Error: err.Error()}
}
}
if versioning {
_, err = c.cli.PutBucketVersioning(context.TODO(), &s3.PutBucketVersioningInput{
Bucket: aws.String(bucket),
VersioningConfiguration: &types.VersioningConfiguration{
Status: types.BucketVersioningStatusEnabled,
},
})
if err != nil {
stats.Report(c.vu, createBucketFails, 1)
return CreateBucketResponse{Success: false, Error: err.Error()}
}
}
stats.Report(c.vu, createBucketSuccess, 1)
stats.Report(c.vu, createBucketDuration, metrics.D(time.Since(start))) stats.Report(c.vu, createBucketDuration, metrics.D(time.Since(start)))
return CreateBucketResponse{Success: true} return CreateBucketResponse{Success: true}
} }

View file

@ -7,7 +7,6 @@ import (
"strconv" "strconv"
"time" "time"
"git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/stats"
"github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/config" "github.com/aws/aws-sdk-go-v2/config"
"github.com/aws/aws-sdk-go-v2/service/s3" "github.com/aws/aws-sdk-go-v2/service/s3"
@ -29,10 +28,10 @@ var (
_ modules.Instance = &S3{} _ modules.Instance = &S3{}
_ modules.Module = &RootModule{} _ modules.Module = &RootModule{}
objPutSuccess, objPutFails, objPutDuration, objPutData *metrics.Metric objPutTotal, objPutFails, objPutDuration *metrics.Metric
objGetSuccess, objGetFails, objGetDuration, objGetData *metrics.Metric objGetTotal, objGetFails, objGetDuration *metrics.Metric
objDeleteSuccess, objDeleteFails, objDeleteDuration *metrics.Metric objDeleteTotal, objDeleteFails, objDeleteDuration *metrics.Metric
createBucketSuccess, createBucketFails, createBucketDuration *metrics.Metric createBucketTotal, createBucketFails, createBucketDuration *metrics.Metric
) )
func init() { func init() {
@ -95,23 +94,22 @@ func (s *S3) Connect(endpoint string, params map[string]string) (*Client, error)
}) })
// register metrics // register metrics
objPutSuccess, _ = stats.Registry.NewMetric("aws_obj_put_success", metrics.Counter) registry := metrics.NewRegistry()
objPutFails, _ = stats.Registry.NewMetric("aws_obj_put_fails", metrics.Counter) objPutTotal, _ = registry.NewMetric("aws_obj_put_total", metrics.Counter)
objPutDuration, _ = stats.Registry.NewMetric("aws_obj_put_duration", metrics.Trend, metrics.Time) objPutFails, _ = registry.NewMetric("aws_obj_put_fails", metrics.Counter)
objPutData, _ = stats.Registry.NewMetric("aws_obj_put_bytes", metrics.Counter, metrics.Data) objPutDuration, _ = registry.NewMetric("aws_obj_put_duration", metrics.Trend, metrics.Time)
objGetSuccess, _ = stats.Registry.NewMetric("aws_obj_get_success", metrics.Counter) objGetTotal, _ = registry.NewMetric("aws_obj_get_total", metrics.Counter)
objGetFails, _ = stats.Registry.NewMetric("aws_obj_get_fails", metrics.Counter) objGetFails, _ = registry.NewMetric("aws_obj_get_fails", metrics.Counter)
objGetDuration, _ = stats.Registry.NewMetric("aws_obj_get_duration", metrics.Trend, metrics.Time) objGetDuration, _ = registry.NewMetric("aws_obj_get_duration", metrics.Trend, metrics.Time)
objGetData, _ = stats.Registry.NewMetric("aws_obj_get_bytes", metrics.Counter, metrics.Data)
objDeleteSuccess, _ = stats.Registry.NewMetric("aws_obj_delete_success", metrics.Counter) objDeleteTotal, _ = registry.NewMetric("aws_obj_delete_total", metrics.Counter)
objDeleteFails, _ = stats.Registry.NewMetric("aws_obj_delete_fails", metrics.Counter) objDeleteFails, _ = registry.NewMetric("aws_obj_delete_fails", metrics.Counter)
objDeleteDuration, _ = stats.Registry.NewMetric("aws_obj_delete_duration", metrics.Trend, metrics.Time) objDeleteDuration, _ = registry.NewMetric("aws_obj_delete_duration", metrics.Trend, metrics.Time)
createBucketSuccess, _ = stats.Registry.NewMetric("aws_create_bucket_success", metrics.Counter) createBucketTotal, _ = registry.NewMetric("aws_create_bucket_total", metrics.Counter)
createBucketFails, _ = stats.Registry.NewMetric("aws_create_bucket_fails", metrics.Counter) createBucketFails, _ = registry.NewMetric("aws_create_bucket_fails", metrics.Counter)
createBucketDuration, _ = stats.Registry.NewMetric("aws_create_bucket_duration", metrics.Trend, metrics.Time) createBucketDuration, _ = registry.NewMetric("aws_create_bucket_duration", metrics.Trend, metrics.Time)
return &Client{ return &Client{
vu: s.vu, vu: s.vu,

View file

@ -1,14 +1,14 @@
package s3local package s3local
import ( import (
"bytes"
"time" "time"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/datagen"
"git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/local"
"git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/stats" "git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/stats"
"github.com/dop251/goja"
"go.k6.io/k6/js/modules" "go.k6.io/k6/js/modules"
"go.k6.io/k6/metrics" "go.k6.io/k6/metrics"
) )
@ -18,13 +18,11 @@ type Client struct {
l layer.Client l layer.Client
ownerID *user.ID ownerID *user.ID
resolver layer.BucketResolver resolver layer.BucketResolver
limiter local.Limiter
} }
type ( type (
SuccessOrErrorResponse struct { SuccessOrErrorResponse struct {
Success bool Success bool
Abort bool
Error string Error string
} }
@ -34,14 +32,7 @@ type (
GetResponse SuccessOrErrorResponse GetResponse SuccessOrErrorResponse
) )
func (c *Client) Put(bucket, key string, payload datagen.Payload) PutResponse { func (c *Client) Put(bucket, key string, payload goja.ArrayBuffer) PutResponse {
if c.limiter.IsFull() {
return PutResponse{
Success: false,
Abort: true,
Error: "engine size limit reached",
}
}
cid, err := c.resolver.Resolve(c.vu.Context(), bucket) cid, err := c.resolver.Resolve(c.vu.Context(), bucket)
if err != nil { if err != nil {
stats.Report(c.vu, objPutFails, 1) stats.Report(c.vu, objPutFails, 1)
@ -57,8 +48,8 @@ func (c *Client) Put(bucket, key string, payload datagen.Payload) PutResponse {
}, },
Header: map[string]string{}, Header: map[string]string{},
Object: key, Object: key,
Size: uint64(payload.Size()), Size: int64(len(payload.Bytes())),
Reader: payload.Reader(), Reader: bytes.NewReader(payload.Bytes()),
} }
start := time.Now() start := time.Now()
@ -68,9 +59,8 @@ func (c *Client) Put(bucket, key string, payload datagen.Payload) PutResponse {
} }
stats.Report(c.vu, objPutDuration, metrics.D(time.Since(start))) stats.Report(c.vu, objPutDuration, metrics.D(time.Since(start)))
stats.Report(c.vu, objPutSuccess, 1) stats.Report(c.vu, objPutTotal, 1)
stats.ReportDataSent(c.vu, float64(prm.Size)) stats.ReportDataSent(c.vu, float64(prm.Size))
stats.Report(c.vu, objPutData, float64(prm.Size))
return PutResponse{Success: true} return PutResponse{Success: true}
} }
@ -108,22 +98,16 @@ func (c *Client) Get(bucket, key string) GetResponse {
Start: 0, Start: 0,
End: uint64(extInfo.ObjectInfo.Size), End: uint64(extInfo.ObjectInfo.Size),
}, },
Writer: wr,
} }
objPayload, err := c.l.GetObject(c.vu.Context(), getPrm) if err := c.l.GetObject(c.vu.Context(), getPrm); err != nil {
if err != nil {
stats.Report(c.vu, objGetFails, 1)
return GetResponse{Error: err.Error()}
}
err = objPayload.StreamTo(wr)
if err != nil {
stats.Report(c.vu, objGetFails, 1) stats.Report(c.vu, objGetFails, 1)
return GetResponse{Error: err.Error()} return GetResponse{Error: err.Error()}
} }
stats.Report(c.vu, objGetDuration, metrics.D(time.Since(start))) stats.Report(c.vu, objGetDuration, metrics.D(time.Since(start)))
stats.Report(c.vu, objGetSuccess, 1) stats.Report(c.vu, objGetTotal, 1)
stats.ReportDataReceived(c.vu, wr.total) stats.ReportDataReceived(c.vu, wr.total)
stats.Report(c.vu, objGetData, wr.total)
return GetResponse{Success: true} return GetResponse{Success: true}
} }

View file

@ -13,6 +13,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/local/rawclient" "git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/local/rawclient"
) )
@ -29,15 +30,15 @@ func unimplementedMessage(fname string) string {
"something other than filling a cluster (i.e. PUT or GET).", fname) "something other than filling a cluster (i.e. PUT or GET).", fname)
} }
func (*frostfs) CreateContainer(context.Context, layer.PrmContainerCreate) (*layer.ContainerCreateResult, error) { func (*frostfs) CreateContainer(context.Context, layer.PrmContainerCreate) (cid.ID, error) {
panic(unimplementedMessage("CreateContainer")) panic(unimplementedMessage("CreateContainer"))
} }
func (*frostfs) Container(ctx context.Context, prmContainer layer.PrmContainer) (*container.Container, error) { func (*frostfs) Container(context.Context, cid.ID) (*container.Container, error) {
panic(unimplementedMessage("Container")) panic(unimplementedMessage("Container"))
} }
func (*frostfs) UserContainers(ctx context.Context, containers layer.PrmUserContainers) ([]cid.ID, error) { func (*frostfs) UserContainers(context.Context, user.ID) ([]cid.ID, error) {
panic(unimplementedMessage("UserContainers")) panic(unimplementedMessage("UserContainers"))
} }
@ -45,7 +46,7 @@ func (*frostfs) SetContainerEACL(context.Context, eacl.Table, *session.Container
panic(unimplementedMessage("SetContainerEACL")) panic(unimplementedMessage("SetContainerEACL"))
} }
func (*frostfs) ContainerEACL(ctx context.Context, containerEACL layer.PrmContainerEACL) (*eacl.Table, error) { func (*frostfs) ContainerEACL(context.Context, cid.ID) (*eacl.Table, error) {
panic(unimplementedMessage("ContainerEACL")) panic(unimplementedMessage("ContainerEACL"))
} }
@ -77,7 +78,7 @@ func (f *frostfs) CreateObject(ctx context.Context, prm layer.PrmObjectCreate) (
for _, attr := range prm.Attributes { for _, attr := range prm.Attributes {
hdrs[attr[0]] = attr[1] hdrs[attr[0]] = attr[1]
} }
return f.Put(ctx, prm.Container, nil, hdrs, payload) return f.Put(ctx, prm.Container, &prm.Creator, hdrs, payload)
} }
func (f *frostfs) DeleteObject(context.Context, layer.PrmObjectDelete) error { func (f *frostfs) DeleteObject(context.Context, layer.PrmObjectDelete) error {
@ -87,7 +88,3 @@ func (f *frostfs) DeleteObject(context.Context, layer.PrmObjectDelete) error {
func (f *frostfs) TimeToEpoch(ctx context.Context, now time.Time, future time.Time) (uint64, uint64, error) { func (f *frostfs) TimeToEpoch(ctx context.Context, now time.Time, future time.Time) (uint64, uint64, error) {
panic(unimplementedMessage("TimeToEpoch")) panic(unimplementedMessage("TimeToEpoch"))
} }
func (f *frostfs) SearchObjects(ctx context.Context, search layer.PrmObjectSearch) ([]oid.ID, error) {
panic(unimplementedMessage("SearchObjects"))
}

View file

@ -32,10 +32,10 @@ var (
_ modules.Module = &RootModule{} _ modules.Module = &RootModule{}
_ modules.Instance = &Local{} _ modules.Instance = &Local{}
internalObjPutSuccess, internalObjPutFails, internalObjPutDuration, internalObjPutData *metrics.Metric internalObjPutTotal, internalObjPutFails, internalObjPutDuration *metrics.Metric
internalObjGetSuccess, internalObjGetFails, internalObjGetDuration, internalObjGetData *metrics.Metric internalObjGetTotal, internalObjGetFails, internalObjGetDuration *metrics.Metric
objPutSuccess, objPutFails, objPutDuration, objPutData *metrics.Metric objPutTotal, objPutFails, objPutDuration *metrics.Metric
objGetSuccess, objGetFails, objGetDuration, objGetData *metrics.Metric objGetTotal, objGetFails, objGetDuration *metrics.Metric
) )
func init() { func init() {
@ -56,7 +56,7 @@ func (s *Local) Exports() modules.Exports {
return modules.Exports{Default: s} return modules.Exports{Default: s}
} }
func (s *Local) Connect(configFile string, configDir string, params map[string]string, bucketMapping map[string]string, maxSizeGB int64) (*Client, error) { func (s *Local) Connect(configFile string, params map[string]string, bucketMapping map[string]string) (*Client, error) {
// Parse configuration flags. // Parse configuration flags.
fs := flag.NewFlagSet("s3local", flag.ContinueOnError) fs := flag.NewFlagSet("s3local", flag.ContinueOnError)
@ -88,37 +88,35 @@ func (s *Local) Connect(configFile string, configDir string, params map[string]s
} }
// Register metrics. // Register metrics.
internalObjPutSuccess, _ = stats.Registry.NewMetric("s3local_internal_obj_put_success", metrics.Counter) registry := metrics.NewRegistry()
internalObjPutFails, _ = stats.Registry.NewMetric("s3local_internal_obj_put_fails", metrics.Counter)
internalObjPutDuration, _ = stats.Registry.NewMetric("s3local_internal_obj_put_duration", metrics.Trend, metrics.Time)
internalObjPutData, _ = stats.Registry.NewMetric("s3local_internal_obj_put_bytes", metrics.Counter, metrics.Data)
internalObjGetSuccess, _ = stats.Registry.NewMetric("s3local_internal_obj_get_success", metrics.Counter) internalObjPutTotal, _ = registry.NewMetric("s3local_internal_obj_put_total", metrics.Counter)
internalObjGetFails, _ = stats.Registry.NewMetric("s3local_internal_obj_get_fails", metrics.Counter) internalObjPutFails, _ = registry.NewMetric("s3local_internal_obj_put_fails", metrics.Counter)
internalObjGetDuration, _ = stats.Registry.NewMetric("s3local_internal_obj_get_duration", metrics.Trend, metrics.Time) internalObjPutDuration, _ = registry.NewMetric("s3local_internal_obj_put_duration", metrics.Trend, metrics.Time)
internalObjGetData, _ = stats.Registry.NewMetric("s3local_internal_obj_get_bytes", metrics.Counter, metrics.Data)
objPutSuccess, _ = stats.Registry.NewMetric("s3local_obj_put_success", metrics.Counter) internalObjGetTotal, _ = registry.NewMetric("s3local_internal_obj_get_total", metrics.Counter)
objPutFails, _ = stats.Registry.NewMetric("s3local_obj_put_fails", metrics.Counter) internalObjGetFails, _ = registry.NewMetric("s3local_internal_obj_get_fails", metrics.Counter)
objPutDuration, _ = stats.Registry.NewMetric("s3local_obj_put_duration", metrics.Trend, metrics.Time) internalObjGetDuration, _ = registry.NewMetric("s3local_internal_obj_get_duration", metrics.Trend, metrics.Time)
objPutData, _ = stats.Registry.NewMetric("s3local_obj_put_bytes", metrics.Counter, metrics.Data)
objGetSuccess, _ = stats.Registry.NewMetric("s3local_obj_get_success", metrics.Counter) objPutTotal, _ = registry.NewMetric("s3local_obj_put_total", metrics.Counter)
objGetFails, _ = stats.Registry.NewMetric("s3local_obj_get_fails", metrics.Counter) objPutFails, _ = registry.NewMetric("s3local_obj_put_fails", metrics.Counter)
objGetDuration, _ = stats.Registry.NewMetric("s3local_obj_get_duration", metrics.Trend, metrics.Time) objPutDuration, _ = registry.NewMetric("s3local_obj_put_duration", metrics.Trend, metrics.Time)
objGetData, _ = stats.Registry.NewMetric("s3local_obj_get_bytes", metrics.Counter, metrics.Data)
objGetTotal, _ = registry.NewMetric("s3local_obj_get_total", metrics.Counter)
objGetFails, _ = registry.NewMetric("s3local_obj_get_fails", metrics.Counter)
objGetDuration, _ = registry.NewMetric("s3local_obj_get_duration", metrics.Trend, metrics.Time)
// Create S3 layer backed by local storage engine and tree service. // Create S3 layer backed by local storage engine and tree service.
ng, limiter, err := s.l.ResolveEngine(s.l.VU().Context(), configFile, configDir, *debugLogger, maxSizeGB) ng, err := s.l.ResolveEngine(s.l.VU().Context(), configFile, *debugLogger)
if err != nil { if err != nil {
return nil, fmt.Errorf("connecting to engine for config - file %q dir %q: %v", configFile, configDir, err) return nil, fmt.Errorf("connecting to engine for config %q: %v", configFile, err)
} }
treeSvc := tree.NewTree(treeServiceEngineWrapper{ treeSvc := tree.NewTree(treeServiceEngineWrapper{
ng: ng, ng: ng,
pos: *nodePosition, pos: *nodePosition,
size: *nodeCount, size: *nodeCount,
}, zap.L()) })
rc := rawclient.New(ng, rc := rawclient.New(ng,
rawclient.WithKey(key.PrivateKey), rawclient.WithKey(key.PrivateKey),
@ -126,18 +124,16 @@ func (s *Local) Connect(configFile string, configDir string, params map[string]s
if err != nil { if err != nil {
stats.Report(s.l.VU(), internalObjPutFails, 1) stats.Report(s.l.VU(), internalObjPutFails, 1)
} else { } else {
stats.Report(s.l.VU(), internalObjPutSuccess, 1) stats.Report(s.l.VU(), internalObjPutTotal, 1)
stats.Report(s.l.VU(), internalObjPutDuration, metrics.D(dt)) stats.Report(s.l.VU(), internalObjPutDuration, metrics.D(dt))
stats.Report(s.l.VU(), internalObjPutData, float64(sz))
} }
}), }),
rawclient.WithGetHandler(func(sz uint64, err error, dt time.Duration) { rawclient.WithGetHandler(func(sz uint64, err error, dt time.Duration) {
if err != nil { if err != nil {
stats.Report(s.l.VU(), internalObjGetFails, 1) stats.Report(s.l.VU(), internalObjGetFails, 1)
} else { } else {
stats.Report(s.l.VU(), internalObjGetSuccess, 1) stats.Report(s.l.VU(), internalObjGetTotal, 1)
stats.Report(s.l.VU(), internalObjGetDuration, metrics.D(dt)) stats.Report(s.l.VU(), internalObjGetDuration, metrics.D(dt))
stats.Report(s.l.VU(), internalObjGetData, float64(sz))
} }
}), }),
) )
@ -148,24 +144,20 @@ func (s *Local) Connect(configFile string, configDir string, params map[string]s
} }
cfg := &layer.Config{ cfg := &layer.Config{
Cache: layer.NewCache(layer.DefaultCachesConfigs(zap.L())), Caches: layer.DefaultCachesConfigs(zap.L()),
AnonKey: layer.AnonymousKey{Key: key}, AnonKey: layer.AnonymousKey{Key: key},
Resolver: resolver, Resolver: resolver,
TreeService: treeSvc, TreeService: treeSvc,
} }
l := layer.NewLayer(zap.L(), &frostfs{rc}, cfg) l := layer.NewLayer(zap.L(), &frostfs{rc}, cfg)
err = l.Initialize(s.l.VU().Context(), nopEventListener{}) l.Initialize(s.l.VU().Context(), nopEventListener{})
if err != nil {
return nil, fmt.Errorf("initialize: %w", err)
}
return &Client{ return &Client{
vu: s.l.VU(), vu: s.l.VU(),
l: l, l: l,
ownerID: rc.OwnerID(), ownerID: rc.OwnerID(),
resolver: resolver, resolver: resolver,
limiter: limiter,
}, nil }, nil
} }

View file

@ -115,7 +115,7 @@ func (s treeServiceEngineWrapper) GetSubTree(ctx context.Context, bktInfo *data.
return fmt.Errorf("getting children: %v", err) return fmt.Errorf("getting children: %v", err)
} }
for _, child := range children { for _, child := range children {
if err := traverse(child.ID, curDepth+1); err != nil { if err := traverse(child, curDepth+1); err != nil {
return err return err
} }
} }
@ -191,12 +191,6 @@ func (s treeServiceEngineWrapper) RemoveNode(ctx context.Context, bktInfo *data.
return err return err
} }
func (s treeServiceEngineWrapper) GetSubTreeStream(
ctx context.Context, bktInfo *data.BucketInfo, treeID string, rootID uint64, depth uint32,
) (tree.SubTreeStream, error) {
panic(unimplementedMessage("TreeService.GetSubTreeStream"))
}
func mapToKV(m map[string]string) []pilorama.KeyValue { func mapToKV(m map[string]string) []pilorama.KeyValue {
var kvs []pilorama.KeyValue var kvs []pilorama.KeyValue
for k, v := range m { for k, v := range m {

View file

@ -1,54 +1,16 @@
package stats package stats
import ( import (
"strings"
"time" "time"
"go.k6.io/k6/js/modules" "go.k6.io/k6/js/modules"
"go.k6.io/k6/metrics" "go.k6.io/k6/metrics"
) )
// RootModule is the global module object type. It is instantiated once per test
// run and will be used to create k6/x/frostfs/stats module instances for each VU.
type RootModule struct {
Instance string
}
var (
tagSet *metrics.TagSet
Registry *metrics.Registry
)
func init() {
Registry = metrics.NewRegistry()
tagSet = Registry.RootTagSet()
modules.Register("k6/x/frostfs/stats", &RootModule{})
}
// SetTags sets additional tags to custom metrics.
// Format: "key1:value1;key2:value2".
// Panics if input has invalid format.
func (m *RootModule) SetTags(labels string) {
kv := make(map[string]string)
pairs := strings.Split(labels, ";")
for _, pair := range pairs {
items := strings.Split(pair, ":")
if len(items) != 2 {
panic("invalid labels format")
}
kv[strings.TrimSpace(items[0])] = strings.TrimSpace(items[1])
}
for k, v := range kv {
tagSet = tagSet.With(k, v)
}
}
func Report(vu modules.VU, metric *metrics.Metric, value float64) { func Report(vu modules.VU, metric *metrics.Metric, value float64) {
metrics.PushIfNotDone(vu.Context(), vu.State().Samples, metrics.Sample{ metrics.PushIfNotDone(vu.Context(), vu.State().Samples, metrics.Sample{
TimeSeries: metrics.TimeSeries{ TimeSeries: metrics.TimeSeries{
Metric: metric, Metric: metric,
Tags: tagSet,
}, },
Time: time.Now(), Time: time.Now(),
Value: value, Value: value,
@ -60,11 +22,9 @@ func ReportDataReceived(vu modules.VU, value float64) {
metrics.Sample{ metrics.Sample{
TimeSeries: metrics.TimeSeries{ TimeSeries: metrics.TimeSeries{
Metric: &metrics.Metric{}, Metric: &metrics.Metric{},
Tags: tagSet,
}, },
Value: value, Value: value,
Time: time.Now(), Time: time.Now()},
},
) )
} }
@ -74,10 +34,8 @@ func ReportDataSent(vu modules.VU, value float64) {
metrics.Sample{ metrics.Sample{
TimeSeries: metrics.TimeSeries{ TimeSeries: metrics.TimeSeries{
Metric: &metrics.Metric{}, Metric: &metrics.Metric{},
Tags: tagSet,
}, },
Value: value, Value: value,
Time: time.Now(), Time: time.Now()},
},
) )
} }

View file

@ -1,6 +0,0 @@
package version
var (
// Version is the xk6 command-line utils version.
Version = "dev"
)

View file

@ -1,66 +1,57 @@
import { sleep } from 'k6'; import datagen from 'k6/x/frostfs/datagen';
import { SharedArray } from 'k6/data';
import exec from 'k6/execution';
import logging from 'k6/x/frostfs/logging';
import native from 'k6/x/frostfs/native'; import native from 'k6/x/frostfs/native';
import logging from 'k6/x/frostfs/logging';
import registry from 'k6/x/frostfs/registry'; import registry from 'k6/x/frostfs/registry';
import stats from 'k6/x/frostfs/stats'; import { SharedArray } from 'k6/data';
import { sleep } from 'k6';
import { newGenerator } from './libs/datagen.js';
import { parseEnv } from './libs/env-parser.js';
import { textSummary } from './libs/k6-summary-0.0.2.js'; import { textSummary } from './libs/k6-summary-0.0.2.js';
import { parseEnv } from './libs/env-parser.js';
import { uuidv4 } from './libs/k6-utils-1.4.0.js'; import { uuidv4 } from './libs/k6-utils-1.4.0.js';
parseEnv(); parseEnv();
const obj_list = new SharedArray( const obj_list = new SharedArray('obj_list', function () {
'obj_list', return JSON.parse(open(__ENV.PREGEN_JSON)).objects;
function () { return JSON.parse(open(__ENV.PREGEN_JSON)).objects; }); });
const container_list = new SharedArray( const container_list = new SharedArray('container_list', function () {
'container_list', return JSON.parse(open(__ENV.PREGEN_JSON)).containers;
function () { return JSON.parse(open(__ENV.PREGEN_JSON)).containers; }); });
const read_size = JSON.parse(open(__ENV.PREGEN_JSON)).obj_size; const read_size = JSON.parse(open(__ENV.PREGEN_JSON)).obj_size;
const summary_json = __ENV.SUMMARY_JSON || '/tmp/summary.json'; const summary_json = __ENV.SUMMARY_JSON || "/tmp/summary.json";
// Select random gRPC endpoint for current VU // Select random gRPC endpoint for current VU
const grpc_endpoints = __ENV.GRPC_ENDPOINTS.split(','); const grpc_endpoints = __ENV.GRPC_ENDPOINTS.split(',');
const grpc_endpoint = const grpc_endpoint = grpc_endpoints[Math.floor(Math.random() * grpc_endpoints.length)];
grpc_endpoints[Math.floor(Math.random() * grpc_endpoints.length)]; const grpc_client = native.connect(grpc_endpoint, '', __ENV.DIAL_TIMEOUT ? parseInt(__ENV.DIAL_TIMEOUT) : 5, __ENV.STREAM_TIMEOUT ? parseInt(__ENV.STREAM_TIMEOUT) : 60);
const grpc_client = native.connect( const log = logging.new().withField("endpoint", grpc_endpoint);
grpc_endpoint, '', __ENV.DIAL_TIMEOUT ? parseInt(__ENV.DIAL_TIMEOUT) : 5,
__ENV.STREAM_TIMEOUT ? parseInt(__ENV.STREAM_TIMEOUT) : 60,
__ENV.PREPARE_LOCALLY ? __ENV.PREPARE_LOCALLY.toLowerCase() === 'true' : false,
1024 * parseInt(__ENV.MAX_OBJECT_SIZE || '0'));
const log = logging.new().withField('endpoint', grpc_endpoint);
const registry_enabled = !!__ENV.REGISTRY_FILE; const registry_enabled = !!__ENV.REGISTRY_FILE;
const obj_registry = const obj_registry = registry_enabled ? registry.open(__ENV.REGISTRY_FILE) : undefined;
registry_enabled ? registry.open(__ENV.REGISTRY_FILE) : undefined;
const duration = __ENV.DURATION; const duration = __ENV.DURATION;
if (!!__ENV.METRIC_TAGS) { const delete_age = __ENV.DELETE_AGE ? parseInt(__ENV.DELETE_AGE) : undefined;
stats.setTags(__ENV.METRIC_TAGS) let obj_to_delete_selector = undefined;
if (registry_enabled && delete_age) {
obj_to_delete_selector = registry.getSelector(
__ENV.REGISTRY_FILE,
"obj_to_delete",
__ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0,
{
status: "created",
age: delete_age,
}
);
} }
const read_age = __ENV.READ_AGE ? parseInt(__ENV.READ_AGE) : 10;
let obj_to_read_selector = undefined; const generator = datagen.generator(1024 * parseInt(__ENV.WRITE_OBJ_SIZE), __ENV.PAYLOAD_TYPE || "");
if (registry_enabled) {
obj_to_read_selector = registry.getLoopedSelector(
__ENV.REGISTRY_FILE, 'obj_to_read',
__ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0, {
status: 'created',
age: read_age,
})
}
const scenarios = {}; const scenarios = {};
const write_vu_count = parseInt(__ENV.WRITERS || '0'); const write_vu_count = parseInt(__ENV.WRITERS || '0');
const write_grpc_chunk_size = 1024 * parseInt(__ENV.GRPC_CHUNK_SIZE || '0')
const generator = newGenerator(write_vu_count > 0);
if (write_vu_count > 0) { if (write_vu_count > 0) {
scenarios.write = { scenarios.write = {
executor: 'constant-vus', executor: 'constant-vus',
@ -71,23 +62,6 @@ if (write_vu_count > 0) {
}; };
} }
const delete_age = __ENV.DELETE_AGE ? parseInt(__ENV.DELETE_AGE) : undefined;
let obj_to_delete_selector = undefined;
let obj_to_delete_exit_on_null = undefined;
if (registry_enabled && delete_age) {
obj_to_delete_exit_on_null = write_vu_count == 0;
let constructor = obj_to_delete_exit_on_null ? registry.getOneshotSelector
: registry.getSelector;
obj_to_delete_selector =
constructor(__ENV.REGISTRY_FILE, 'obj_to_delete',
__ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0, {
status: 'created',
age: delete_age,
});
}
const read_vu_count = parseInt(__ENV.READERS || '0'); const read_vu_count = parseInt(__ENV.READERS || '0');
if (read_vu_count > 0) { if (read_vu_count > 0) {
scenarios.read = { scenarios.read = {
@ -102,8 +76,7 @@ if (read_vu_count > 0) {
const delete_vu_count = parseInt(__ENV.DELETERS || '0'); const delete_vu_count = parseInt(__ENV.DELETERS || '0');
if (delete_vu_count > 0) { if (delete_vu_count > 0) {
if (!obj_to_delete_selector) { if (!obj_to_delete_selector) {
throw new Error( throw new Error('Positive DELETE worker number without a proper object selector');
'Positive DELETE worker number without a proper object selector');
} }
scenarios.delete = { scenarios.delete = {
@ -130,19 +103,12 @@ export function setup() {
console.log(`Writing VUs: ${write_vu_count}`); console.log(`Writing VUs: ${write_vu_count}`);
console.log(`Deleting VUs: ${delete_vu_count}`); console.log(`Deleting VUs: ${delete_vu_count}`);
console.log(`Total VUs: ${total_vu_count}`); console.log(`Total VUs: ${total_vu_count}`);
const start_timestamp = Date.now()
console.log(
`Load started at: ${Date(start_timestamp).toString()}`)
} }
export function teardown(data) { export function teardown(data) {
if (obj_registry) { if (obj_registry) {
obj_registry.close(); obj_registry.close();
} }
const end_timestamp = Date.now()
console.log(
`Load finished at: ${Date(end_timestamp).toString()}`)
} }
export function handleSummary(data) { export function handleSummary(data) {
@ -157,20 +123,20 @@ export function obj_write() {
sleep(__ENV.SLEEP_WRITE); sleep(__ENV.SLEEP_WRITE);
} }
const headers = { unique_header: uuidv4() }; const headers = {
const container = unique_header: uuidv4()
container_list[Math.floor(Math.random() * container_list.length)]; };
const container = container_list[Math.floor(Math.random() * container_list.length)];
const payload = generator.genPayload(); const { payload, hash } = generator.genPayload(registry_enabled);
const resp = const resp = grpc_client.put(container, headers, payload);
grpc_client.put(container, headers, payload, write_grpc_chunk_size);
if (!resp.success) { if (!resp.success) {
log.withField('cid', container).error(resp.error); log.withField("cid", container).error(resp.error);
return; return;
} }
if (obj_registry) { if (obj_registry) {
obj_registry.addObject(container, resp.object_id, '', '', payload.hash()); obj_registry.addObject(container, resp.object_id, "", "", hash);
} }
} }
@ -179,22 +145,10 @@ export function obj_read() {
sleep(__ENV.SLEEP_READ); sleep(__ENV.SLEEP_READ);
} }
if (obj_to_read_selector) {
const obj = obj_to_read_selector.nextObject();
if (!obj) {
return;
}
const resp = grpc_client.get(obj.c_id, obj.o_id)
if (!resp.success) {
log.withFields({ cid: obj.c_id, oid: obj.o_id }).error(resp.error);
}
return
}
const obj = obj_list[Math.floor(Math.random() * obj_list.length)]; const obj = obj_list[Math.floor(Math.random() * obj_list.length)];
const resp = grpc_client.get(obj.container, obj.object) const resp = grpc_client.get(obj.container, obj.object)
if (!resp.success) { if (!resp.success) {
log.withFields({ cid: obj.container, oid: obj.object }).error(resp.error); log.withFields({cid: obj.container, oid: obj.object}).error(resp.error);
} }
} }
@ -205,16 +159,13 @@ export function obj_delete() {
const obj = obj_to_delete_selector.nextObject(); const obj = obj_to_delete_selector.nextObject();
if (!obj) { if (!obj) {
if (obj_to_delete_exit_on_null) {
exec.test.abort("No more objects to select");
}
return; return;
} }
const resp = grpc_client.delete(obj.c_id, obj.o_id); const resp = grpc_client.delete(obj.c_id, obj.o_id);
if (!resp.success) { if (!resp.success) {
// Log errors except (2052 - object already deleted) // Log errors except (2052 - object already deleted)
log.withFields({ cid: obj.c_id, oid: obj.o_id }).error(resp.error); log.withFields({cid: obj.c_id, oid: obj.o_id}).error(resp.error);
return; return;
} }

View file

@ -1,13 +1,11 @@
import { sleep } from 'k6'; import datagen from 'k6/x/frostfs/datagen';
import { SharedArray } from 'k6/data';
import logging from 'k6/x/frostfs/logging';
import native from 'k6/x/frostfs/native'; import native from 'k6/x/frostfs/native';
import logging from 'k6/x/frostfs/logging';
import registry from 'k6/x/frostfs/registry'; import registry from 'k6/x/frostfs/registry';
import stats from 'k6/x/frostfs/stats'; import { SharedArray } from 'k6/data';
import { sleep } from 'k6';
import { newGenerator } from './libs/datagen.js';
import { parseEnv } from './libs/env-parser.js';
import { textSummary } from './libs/k6-summary-0.0.2.js'; import { textSummary } from './libs/k6-summary-0.0.2.js';
import { parseEnv } from './libs/env-parser.js';
import { uuidv4 } from './libs/k6-utils-1.4.0.js'; import { uuidv4 } from './libs/k6-utils-1.4.0.js';
parseEnv(); parseEnv();
@ -21,50 +19,35 @@ const container_list = new SharedArray('container_list', function () {
}); });
const read_size = JSON.parse(open(__ENV.PREGEN_JSON)).obj_size; const read_size = JSON.parse(open(__ENV.PREGEN_JSON)).obj_size;
const summary_json = __ENV.SUMMARY_JSON || '/tmp/summary.json'; const summary_json = __ENV.SUMMARY_JSON || "/tmp/summary.json";
// Select random gRPC endpoint for current VU // Select random gRPC endpoint for current VU
const grpc_endpoints = __ENV.GRPC_ENDPOINTS.split(','); const grpc_endpoints = __ENV.GRPC_ENDPOINTS.split(',');
const grpc_endpoint = const grpc_endpoint = grpc_endpoints[Math.floor(Math.random() * grpc_endpoints.length)];
grpc_endpoints[Math.floor(Math.random() * grpc_endpoints.length)]; const grpc_client = native.connect(grpc_endpoint, '', __ENV.DIAL_TIMEOUT ? parseInt(__ENV.DIAL_TIMEOUT) : 5, __ENV.STREAM_TIMEOUT ? parseInt(__ENV.STREAM_TIMEOUT) : 60);
const grpc_client = native.connect( const log = logging.new().withField("endpoint", grpc_endpoint);
grpc_endpoint, '', __ENV.DIAL_TIMEOUT ? parseInt(__ENV.DIAL_TIMEOUT) : 5,
__ENV.STREAM_TIMEOUT ? parseInt(__ENV.STREAM_TIMEOUT) : 60,
__ENV.PREPARE_LOCALLY ? __ENV.PREPARE_LOCALLY.toLowerCase() === 'true' : false,
1024 * parseInt(__ENV.MAX_OBJECT_SIZE || '0'));
const log = logging.new().withField('endpoint', grpc_endpoint);
const registry_enabled = !!__ENV.REGISTRY_FILE; const registry_enabled = !!__ENV.REGISTRY_FILE;
const obj_registry = const obj_registry = registry_enabled ? registry.open(__ENV.REGISTRY_FILE) : undefined;
registry_enabled ? registry.open(__ENV.REGISTRY_FILE) : undefined;
const duration = __ENV.DURATION; const duration = __ENV.DURATION;
if (!!__ENV.METRIC_TAGS) {
stats.setTags(__ENV.METRIC_TAGS)
}
const delete_age = __ENV.DELETE_AGE ? parseInt(__ENV.DELETE_AGE) : undefined; const delete_age = __ENV.DELETE_AGE ? parseInt(__ENV.DELETE_AGE) : undefined;
let obj_to_delete_selector = undefined; let obj_to_delete_selector = undefined;
if (registry_enabled && delete_age) { if (registry_enabled && delete_age) {
obj_to_delete_selector = registry.getSelector( obj_to_delete_selector = registry.getSelector(
__ENV.REGISTRY_FILE, 'obj_to_delete', __ENV.REGISTRY_FILE,
__ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0, { "obj_to_delete",
status: 'created', __ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0,
{
status: "created",
age: delete_age, age: delete_age,
}); }
);
} }
const read_age = __ENV.READ_AGE ? parseInt(__ENV.READ_AGE) : 10;
let obj_to_read_selector = undefined; const generator = datagen.generator(1024 * parseInt(__ENV.WRITE_OBJ_SIZE));
if (registry_enabled) {
obj_to_read_selector = registry.getLoopedSelector(
__ENV.REGISTRY_FILE, 'obj_to_read',
__ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0, {
status: 'created',
age: read_age,
})
}
const scenarios = {}; const scenarios = {};
@ -72,8 +55,6 @@ const time_unit = __ENV.TIME_UNIT || '1s';
const pre_alloc_write_vus = parseInt(__ENV.PRE_ALLOC_WRITERS || '0'); const pre_alloc_write_vus = parseInt(__ENV.PRE_ALLOC_WRITERS || '0');
const max_write_vus = parseInt(__ENV.MAX_WRITERS || pre_alloc_write_vus); const max_write_vus = parseInt(__ENV.MAX_WRITERS || pre_alloc_write_vus);
const write_rate = parseInt(__ENV.WRITE_RATE || '0'); const write_rate = parseInt(__ENV.WRITE_RATE || '0');
const write_grpc_chunk_size = 1024 * parseInt(__ENV.GRPC_CHUNK_SIZE || '0')
const generator = newGenerator(write_rate > 0);
if (write_rate > 0) { if (write_rate > 0) {
scenarios.write = { scenarios.write = {
executor: 'constant-arrival-rate', executor: 'constant-arrival-rate',
@ -108,8 +89,7 @@ const max_delete_vus = parseInt(__ENV.MAX_DELETERS || pre_alloc_write_vus);
const delete_rate = parseInt(__ENV.DELETE_RATE || '0'); const delete_rate = parseInt(__ENV.DELETE_RATE || '0');
if (delete_rate > 0) { if (delete_rate > 0) {
if (!obj_to_delete_selector) { if (!obj_to_delete_selector) {
throw new Error( throw new Error('Positive DELETE worker number without a proper object selector');
'Positive DELETE worker number without a proper object selector');
} }
scenarios.delete = { scenarios.delete = {
@ -130,8 +110,7 @@ export const options = {
}; };
export function setup() { export function setup() {
const total_pre_allocated_vu_count = const total_pre_allocated_vu_count = pre_alloc_write_vus + pre_alloc_read_vus + pre_alloc_delete_vus;
pre_alloc_write_vus + pre_alloc_read_vus + pre_alloc_delete_vus;
const total_max_vu_count = max_read_vus + max_write_vus + max_delete_vus const total_max_vu_count = max_read_vus + max_write_vus + max_delete_vus
console.log(`Pregenerated containers: ${container_list.length}`); console.log(`Pregenerated containers: ${container_list.length}`);
@ -149,19 +128,12 @@ export function setup() {
console.log(`Read rate: ${read_rate}`); console.log(`Read rate: ${read_rate}`);
console.log(`Writing rate: ${write_rate}`); console.log(`Writing rate: ${write_rate}`);
console.log(`Delete rate: ${delete_rate}`); console.log(`Delete rate: ${delete_rate}`);
const start_timestamp = Date.now()
console.log(
`Load started at: ${Date(start_timestamp).toString()}`)
} }
export function teardown(data) { export function teardown(data) {
if (obj_registry) { if (obj_registry) {
obj_registry.close(); obj_registry.close();
} }
const end_timestamp = Date.now()
console.log(
`Load finished at: ${Date(end_timestamp).toString()}`)
} }
export function handleSummary(data) { export function handleSummary(data) {
@ -176,20 +148,20 @@ export function obj_write() {
sleep(__ENV.SLEEP_WRITE); sleep(__ENV.SLEEP_WRITE);
} }
const headers = { unique_header: uuidv4() }; const headers = {
const container = unique_header: uuidv4()
container_list[Math.floor(Math.random() * container_list.length)]; };
const container = container_list[Math.floor(Math.random() * container_list.length)];
const payload = generator.genPayload(); const { payload, hash } = generator.genPayload(registry_enabled);
const resp = const resp = grpc_client.put(container, headers, payload);
grpc_client.put(container, headers, payload, write_grpc_chunk_size);
if (!resp.success) { if (!resp.success) {
log.withField('cid', container).error(resp.error); log.withField("cid", container).error(resp.error);
return; return;
} }
if (obj_registry) { if (obj_registry) {
obj_registry.addObject(container, resp.object_id, '', '', payload.hash()); obj_registry.addObject(container, resp.object_id, "", "", hash);
} }
} }
@ -198,22 +170,10 @@ export function obj_read() {
sleep(__ENV.SLEEP_READ); sleep(__ENV.SLEEP_READ);
} }
if (obj_to_read_selector) {
const obj = obj_to_read_selector.nextObject();
if (!obj) {
return;
}
const resp = grpc_client.get(obj.c_id, obj.o_id)
if (!resp.success) {
log.withFields({ cid: obj.c_id, oid: obj.o_id }).error(resp.error);
}
return
}
const obj = obj_list[Math.floor(Math.random() * obj_list.length)]; const obj = obj_list[Math.floor(Math.random() * obj_list.length)];
const resp = grpc_client.get(obj.container, obj.object) const resp = grpc_client.get(obj.container, obj.object)
if (!resp.success) { if (!resp.success) {
log.withFields({ cid: obj.container, oid: obj.object }).error(resp.error); log.withFields({cid: obj.container, oid: obj.object}).error(resp.error);
} }
} }
@ -230,7 +190,7 @@ export function obj_delete() {
const resp = grpc_client.delete(obj.c_id, obj.o_id); const resp = grpc_client.delete(obj.c_id, obj.o_id);
if (!resp.success) { if (!resp.success) {
// Log errors except (2052 - object already deleted) // Log errors except (2052 - object already deleted)
log.withFields({ cid: obj.c_id, oid: obj.o_id }).error(resp.error); log.withFields({cid: obj.c_id, oid: obj.o_id}).error(resp.error);
return; return;
} }

View file

@ -1,48 +1,41 @@
import {sleep} from 'k6'; import datagen from 'k6/x/frostfs/datagen';
import {SharedArray} from 'k6/data';
import http from 'k6/http';
import logging from 'k6/x/frostfs/logging'; import logging from 'k6/x/frostfs/logging';
import registry from 'k6/x/frostfs/registry'; import registry from 'k6/x/frostfs/registry';
import stats from 'k6/x/frostfs/stats'; import http from 'k6/http';
import { SharedArray } from 'k6/data';
import {newGenerator} from './libs/datagen.js'; import { sleep } from 'k6';
import {parseEnv} from './libs/env-parser.js'; import { textSummary } from './libs/k6-summary-0.0.2.js';
import {textSummary} from './libs/k6-summary-0.0.2.js'; import { parseEnv } from './libs/env-parser.js';
import {uuidv4} from './libs/k6-utils-1.4.0.js'; import { uuidv4 } from './libs/k6-utils-1.4.0.js';
parseEnv(); parseEnv();
const obj_list = new SharedArray('obj_list', function() { const obj_list = new SharedArray('obj_list', function () {
return JSON.parse(open(__ENV.PREGEN_JSON)).objects; return JSON.parse(open(__ENV.PREGEN_JSON)).objects;
}); });
const container_list = new SharedArray('container_list', function() { const container_list = new SharedArray('container_list', function () {
return JSON.parse(open(__ENV.PREGEN_JSON)).containers; return JSON.parse(open(__ENV.PREGEN_JSON)).containers;
}); });
const read_size = JSON.parse(open(__ENV.PREGEN_JSON)).obj_size; const read_size = JSON.parse(open(__ENV.PREGEN_JSON)).obj_size;
const summary_json = __ENV.SUMMARY_JSON || '/tmp/summary.json'; const summary_json = __ENV.SUMMARY_JSON || "/tmp/summary.json";
// Select random HTTP endpoint for current VU // Select random HTTP endpoint for current VU
const http_endpoints = __ENV.HTTP_ENDPOINTS.split(','); const http_endpoints = __ENV.HTTP_ENDPOINTS.split(',');
const http_endpoint = const http_endpoint = http_endpoints[Math.floor(Math.random() * http_endpoints.length)];
http_endpoints[Math.floor(Math.random() * http_endpoints.length)]; const log = logging.new().withField("endpoint", http_endpoint);
const log = logging.new().withField('endpoint', http_endpoint);
const registry_enabled = !!__ENV.REGISTRY_FILE; const registry_enabled = !!__ENV.REGISTRY_FILE;
const obj_registry = const obj_registry = registry_enabled ? registry.open(__ENV.REGISTRY_FILE) : undefined;
registry_enabled ? registry.open(__ENV.REGISTRY_FILE) : undefined;
const duration = __ENV.DURATION; const duration = __ENV.DURATION;
if (!!__ENV.METRIC_TAGS) { const generator = datagen.generator(1024 * parseInt(__ENV.WRITE_OBJ_SIZE), __ENV.PAYLOAD_TYPE || "");
stats.setTags(__ENV.METRIC_TAGS)
}
const scenarios = {}; const scenarios = {};
const write_vu_count = parseInt(__ENV.WRITERS || '0'); const write_vu_count = parseInt(__ENV.WRITERS || '0');
const generator = newGenerator(write_vu_count > 0);
if (write_vu_count > 0) { if (write_vu_count > 0) {
scenarios.write = { scenarios.write = {
executor: 'constant-vus', executor: 'constant-vus',
@ -78,24 +71,17 @@ export function setup() {
console.log(`Reading VUs: ${read_vu_count}`); console.log(`Reading VUs: ${read_vu_count}`);
console.log(`Writing VUs: ${write_vu_count}`); console.log(`Writing VUs: ${write_vu_count}`);
console.log(`Total VUs: ${total_vu_count}`); console.log(`Total VUs: ${total_vu_count}`);
const start_timestamp = Date.now()
console.log(
`Load started at: ${Date(start_timestamp).toString()}`)
} }
export function teardown(data) { export function teardown(data) {
if (obj_registry) { if (obj_registry) {
obj_registry.close(); obj_registry.close();
} }
const end_timestamp = Date.now()
console.log(
`Load finished at: ${Date(end_timestamp).toString()}`)
} }
export function handleSummary(data) { export function handleSummary(data) {
return { return {
'stdout': textSummary(data, {indent: ' ', enableColors: false}), 'stdout': textSummary(data, { indent: ' ', enableColors: false }),
[summary_json]: JSON.stringify(data), [summary_json]: JSON.stringify(data),
}; };
} }
@ -105,16 +91,12 @@ export function obj_write() {
sleep(__ENV.SLEEP_WRITE); sleep(__ENV.SLEEP_WRITE);
} }
const container = const container = container_list[Math.floor(Math.random() * container_list.length)];
container_list[Math.floor(Math.random() * container_list.length)];
const payload = generator.genPayload(); const { payload, hash } = generator.genPayload(registry_enabled);
const data = { const data = {
field: uuidv4(), field: uuidv4(),
// Because we use `file` wrapping and it is not straightforward to use file: http.file(payload, "random.data"),
// streams here,
// `-e STREAMING=1` has no effect for this scenario.
file: http.file(payload.bytes(), 'random.data'),
}; };
const resp = http.post(`http://${http_endpoint}/upload/${container}`, data); const resp = http.post(`http://${http_endpoint}/upload/${container}`, data);
@ -124,7 +106,7 @@ export function obj_write() {
} }
const object_id = JSON.parse(resp.body).object_id; const object_id = JSON.parse(resp.body).object_id;
if (obj_registry) { if (obj_registry) {
obj_registry.addObject(container, object_id, '', '', payload.hash()); obj_registry.addObject(container, object_id, "", "", hash);
} }
} }
@ -134,10 +116,8 @@ export function obj_read() {
} }
const obj = obj_list[Math.floor(Math.random() * obj_list.length)]; const obj = obj_list[Math.floor(Math.random() * obj_list.length)];
const resp = const resp = http.get(`http://${http_endpoint}/get/${obj.container}/${obj.object}`);
http.get(`http://${http_endpoint}/get/${obj.container}/${obj.object}`);
if (resp.status != 200) { if (resp.status != 200) {
log.withFields({status: resp.status, cid: obj.container, oid: obj.object}) log.withFields({status: resp.status, cid: obj.container, oid: obj.object}).error(resp.error);
.error(resp.error);
} }
} }

View file

@ -1,8 +0,0 @@
import datagen from 'k6/x/frostfs/datagen';
export function newGenerator(condition) {
if (condition) {
return datagen.generator(1024 * parseInt(__ENV.WRITE_OBJ_SIZE), __ENV.PAYLOAD_TYPE || "", !!__ENV.STREAMING);
}
return undefined;
}

View file

@ -1,34 +0,0 @@
import { uuidv4 } from './k6-utils-1.4.0.js';
export function generateS3Key() {
let width = parseInt(__ENV.DIR_WIDTH || '0');
let height = parseInt(__ENV.DIR_HEIGHT || '0');
let key = ''
if (width > 0 && height > 0) {
for (let index = 0; index < height; index++) {
const w = Math.floor(Math.random() * width) + 1;
key = key + 'dir' + w + '/';
}
}
key += objName();
return key;
}
const asciiLetters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890"
function objName() {
if (__ENV.OBJ_NAME) {
return __ENV.OBJ_NAME;
}
const length = parseInt(__ENV.OBJ_NAME_LENGTH || '0');
if (length > 0) {
let name = "";
for (let i = 0; i < length; i++) {
name += asciiLetters.charAt(Math.floor(Math.random() * asciiLetters.length));
}
return name;
}
return uuidv4();
}

View file

@ -1,63 +1,54 @@
import {SharedArray} from 'k6/data'; import datagen from 'k6/x/frostfs/datagen';
import exec from 'k6/execution';
import local from 'k6/x/frostfs/local'; import local from 'k6/x/frostfs/local';
import logging from 'k6/x/frostfs/logging'; import logging from 'k6/x/frostfs/logging';
import registry from 'k6/x/frostfs/registry'; import registry from 'k6/x/frostfs/registry';
import stats from 'k6/x/frostfs/stats'; import { SharedArray } from 'k6/data';
import { textSummary } from './libs/k6-summary-0.0.2.js';
import {newGenerator} from './libs/datagen.js'; import { parseEnv } from './libs/env-parser.js';
import {parseEnv} from './libs/env-parser.js'; import { uuidv4 } from './libs/k6-utils-1.4.0.js';
import {textSummary} from './libs/k6-summary-0.0.2.js';
import {uuidv4} from './libs/k6-utils-1.4.0.js';
parseEnv(); parseEnv();
const obj_list = new SharedArray('obj_list', function() { const obj_list = new SharedArray('obj_list', function () {
return JSON.parse(open(__ENV.PREGEN_JSON)).objects; return JSON.parse(open(__ENV.PREGEN_JSON)).objects;
}); });
const container_list = new SharedArray('container_list', function() { const container_list = new SharedArray('container_list', function () {
return JSON.parse(open(__ENV.PREGEN_JSON)).containers; return JSON.parse(open(__ENV.PREGEN_JSON)).containers;
}); });
const read_size = JSON.parse(open(__ENV.PREGEN_JSON)).obj_size; const read_size = JSON.parse(open(__ENV.PREGEN_JSON)).obj_size;
const summary_json = __ENV.SUMMARY_JSON || '/tmp/summary.json'; const summary_json = __ENV.SUMMARY_JSON || "/tmp/summary.json";
const config_file = __ENV.CONFIG_FILE; const config_file = __ENV.CONFIG_FILE;
const config_dir = __ENV.CONFIG_DIR;
const debug_logger = (__ENV.DEBUG_LOGGER || 'false') == 'true'; const debug_logger = (__ENV.DEBUG_LOGGER || 'false') == 'true';
const max_total_size_gb = const local_client = local.connect(config_file, '', debug_logger);
__ENV.MAX_TOTAL_SIZE_GB ? parseInt(__ENV.MAX_TOTAL_SIZE_GB) : 0; const log = logging.new().withField("config", config_file);
const local_client =
local.connect(config_file, config_dir, '', debug_logger, max_total_size_gb);
const log = logging.new().withFields(
{'config_file': config_file, 'config_dir': config_dir});
const registry_enabled = !!__ENV.REGISTRY_FILE; const registry_enabled = !!__ENV.REGISTRY_FILE;
const obj_registry = const obj_registry = registry_enabled ? registry.open(__ENV.REGISTRY_FILE) : undefined;
registry_enabled ? registry.open(__ENV.REGISTRY_FILE) : undefined;
const duration = __ENV.DURATION; const duration = __ENV.DURATION;
if (!!__ENV.METRIC_TAGS) {
stats.setTags(__ENV.METRIC_TAGS)
}
const delete_age = __ENV.DELETE_AGE ? parseInt(__ENV.DELETE_AGE) : undefined; const delete_age = __ENV.DELETE_AGE ? parseInt(__ENV.DELETE_AGE) : undefined;
let obj_to_delete_selector = undefined; let obj_to_delete_selector = undefined;
if (registry_enabled && delete_age) { if (registry_enabled && delete_age) {
obj_to_delete_selector = registry.getSelector( obj_to_delete_selector = registry.getSelector(
__ENV.REGISTRY_FILE, 'obj_to_delete', __ENV.REGISTRY_FILE,
__ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0, { "obj_to_delete",
status: 'created', __ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0,
{
status: "created",
age: delete_age, age: delete_age,
}); }
);
} }
const generator = datagen.generator(1024 * parseInt(__ENV.WRITE_OBJ_SIZE));
const scenarios = {}; const scenarios = {};
const write_vu_count = parseInt(__ENV.WRITERS || '0'); const write_vu_count = parseInt(__ENV.WRITERS || '0');
const generator = newGenerator(write_vu_count > 0);
if (write_vu_count > 0) { if (write_vu_count > 0) {
scenarios.write = { scenarios.write = {
executor: 'constant-vus', executor: 'constant-vus',
@ -82,8 +73,7 @@ if (read_vu_count > 0) {
const delete_vu_count = parseInt(__ENV.DELETERS || '0'); const delete_vu_count = parseInt(__ENV.DELETERS || '0');
if (delete_vu_count > 0) { if (delete_vu_count > 0) {
if (!obj_to_delete_selector) { if (!obj_to_delete_selector) {
throw new Error( throw new Error('Positive DELETE worker number without a proper object selector');
'Positive DELETE worker number without a proper object selector');
} }
scenarios.delete = { scenarios.delete = {
@ -110,45 +100,36 @@ export function setup() {
console.log(`Writing VUs: ${write_vu_count}`); console.log(`Writing VUs: ${write_vu_count}`);
console.log(`Deleting VUs: ${delete_vu_count}`); console.log(`Deleting VUs: ${delete_vu_count}`);
console.log(`Total VUs: ${total_vu_count}`); console.log(`Total VUs: ${total_vu_count}`);
const start_timestamp = Date.now()
console.log(
`Load started at: ${Date(start_timestamp).toString()}`)
} }
export function teardown(data) { export function teardown(data) {
if (obj_registry) { if (obj_registry) {
obj_registry.close(); obj_registry.close();
} }
const end_timestamp = Date.now()
console.log(
`Load finished at: ${Date(end_timestamp).toString()}`)
} }
export function handleSummary(data) { export function handleSummary(data) {
return { return {
'stdout': textSummary(data, {indent: ' ', enableColors: false}), 'stdout': textSummary(data, { indent: ' ', enableColors: false }),
[summary_json]: JSON.stringify(data), [summary_json]: JSON.stringify(data),
}; };
} }
export function obj_write() { export function obj_write() {
const headers = {unique_header: uuidv4()}; const headers = {
const container = unique_header: uuidv4()
container_list[Math.floor(Math.random() * container_list.length)]; };
const container = container_list[Math.floor(Math.random() * container_list.length)];
const payload = generator.genPayload(); const { payload, hash } = generator.genPayload(registry_enabled);
const resp = local_client.put(container, headers, payload); const resp = local_client.put(container, headers, payload);
if (!resp.success) { if (!resp.success) {
if (resp.abort) { log.withField("cid", container).error(resp.error);
exec.test.abort(resp.error);
}
log.withField('cid', container).error(resp.error);
return; return;
} }
if (obj_registry) { if (obj_registry) {
obj_registry.addObject(container, resp.object_id, '', '', payload.hash()); obj_registry.addObject(container, resp.object_id, "", "", hash);
} }
} }

View file

@ -1,54 +1,45 @@
import uuid import uuid
from helpers.cmd import execute_cmd, log from helpers.cmd import execute_cmd
def create_bucket(endpoint, versioning, location, acl, no_verify_ssl): def create_bucket(endpoint, versioning, location):
configuration = ""
if location: if location:
configuration = f"--create-bucket-configuration 'LocationConstraint={location}'" location = f"--create-bucket-configuration 'LocationConstraint={location}'"
if acl:
acl = f"--acl {acl}"
bucket_name = str(uuid.uuid4()) bucket_name = str(uuid.uuid4())
no_verify_ssl_str = "--no-verify-ssl" if no_verify_ssl else ""
cmd_line = f"aws {no_verify_ssl_str} s3api create-bucket --bucket {bucket_name} " \
f"--endpoint {endpoint} {configuration} {acl} "
cmd_line_ver = f"aws {no_verify_ssl_str} s3api put-bucket-versioning --bucket {bucket_name} " \
f"--versioning-configuration Status=Enabled --endpoint {endpoint}"
output, success = execute_cmd(cmd_line) cmd_line = f"aws --no-verify-ssl s3api create-bucket --bucket {bucket_name} " \
f"--endpoint http://{endpoint} {location}"
cmd_line_ver = f"aws --no-verify-ssl s3api put-bucket-versioning --bucket {bucket_name} " \
f"--versioning-configuration Status=Enabled --endpoint http://{endpoint} "
if not success and "succeeded and you already own it" not in output: out, success = execute_cmd(cmd_line)
log(f"{cmd_line}\n"
f"Bucket {bucket_name} has not been created:\n" if not success and "succeeded and you already own it" not in out:
f"Error: {output}", endpoint) print(f" > Bucket {bucket_name} has not been created:\n{out}")
return False return False
if versioning: print(f"cmd: {cmd_line}")
output, success = execute_cmd(cmd_line_ver)
if not success: if versioning == "True":
log(f"{cmd_line_ver}\n" out, success = execute_cmd(cmd_line_ver)
f"Bucket versioning has not been applied for bucket {bucket_name}\n" if not success:
f"Error: {output}", endpoint) print(f" > Bucket versioning has not been applied for bucket {bucket_name}:\n{out}")
else: else:
log(f"Bucket versioning has been applied for bucket {bucket_name}", endpoint) print(f" > Bucket versioning has been applied.")
log(f"Created bucket: {bucket_name} ({location})", endpoint)
return bucket_name return bucket_name
def upload_object(bucket, payload_filepath, endpoint, no_verify_ssl): def upload_object(bucket, payload_filepath, endpoint):
object_name = str(uuid.uuid4()) object_name = str(uuid.uuid4())
no_verify_ssl_str = "--no-verify-ssl" if no_verify_ssl else ""
cmd_line = f"aws {no_verify_ssl_str} s3api put-object --bucket {bucket} --key {object_name} " \ cmd_line = f"aws --no-verify-ssl s3api put-object --bucket {bucket} --key {object_name} " \
f"--body {payload_filepath} --endpoint {endpoint}" f"--body {payload_filepath} --endpoint http://{endpoint}"
output, success = execute_cmd(cmd_line) out, success = execute_cmd(cmd_line)
if not success: if not success:
log(f"{cmd_line}\n" print(f" > Object {object_name} has not been uploaded.")
f"Object {object_name} has not been uploaded\n"
f"Error: {output}", endpoint)
return False return False
else:
return bucket, endpoint, object_name return object_name

View file

@ -1,12 +1,9 @@
import os import os
import shlex import shlex
import sys import sys
from datetime import datetime
from subprocess import check_output, CalledProcessError, STDOUT from subprocess import check_output, CalledProcessError, STDOUT
def log(message, endpoint):
time = datetime.utcnow()
print(f"{time} at {endpoint}: {message}")
def execute_cmd(cmd_line): def execute_cmd(cmd_line):
cmd_args = shlex.split(cmd_line) cmd_args = shlex.split(cmd_line)

View file

@ -1,175 +1,81 @@
import re import re
from helpers.cmd import execute_cmd, log
def create_container(endpoint, policy, container_creation_retry, wallet_path, config, rules, local=False, retry=0): from helpers.cmd import execute_cmd
if retry > int(container_creation_retry):
raise ValueError(f"unable to create container: too many unsuccessful attempts")
if wallet_path:
wallet_file = f"--wallet {wallet_path}" def create_container(endpoint, policy, wallet_file, wallet_config):
if config: cmd_line = f"frostfs-cli --rpc-endpoint {endpoint} container create --wallet {wallet_file} --config {wallet_config} " \
wallet_config = f"--config {config}" f" --policy '{policy}' --basic-acl public-read-write --await"
cmd_line = f"frostfs-cli --rpc-endpoint {endpoint} container create {wallet_file} {wallet_config} " \
f" --policy '{policy}' --await"
output, success = execute_cmd(cmd_line) output, success = execute_cmd(cmd_line)
if not success: if not success:
log(f"{cmd_line}\n" print(f" > Container has not been created:\n{output}")
f"Container has not been created\n"
f"{output}", endpoint)
return False return False
else:
try: try:
fst_str = output.split('\n')[0] fst_str = output.split('\n')[0]
except Exception: except Exception:
log(f"{cmd_line}\n" print(f"Got empty output: {output}")
f"Incorrect output\n"
f"Output: {output or '<empty>'}", endpoint)
return False return False
splitted = fst_str.split(": ") splitted = fst_str.split(": ")
if len(splitted) != 2: if len(splitted) != 2:
raise ValueError(f"no CID was parsed from command output:\t{fst_str}") raise ValueError(f"no CID was parsed from command output: \t{fst_str}")
cid = splitted[1]
log(f"Created container: {cid} ({policy})", endpoint) print(f"Created container: {splitted[1]}")
# Add rule for container return splitted[1]
if rules:
r = ""
for rule in rules:
r += f" --rule '{rule}' "
cmd_line = f"frostfs-cli --rpc-endpoint {endpoint} ape-manager add {wallet_file} {wallet_config} " \
f" --chain-id 'chain-id' {r} --target-name '{cid}' --target-type 'container'"
output, success = execute_cmd(cmd_line)
if not success:
log(f"{cmd_line}\n"
f"Rule has not been added\n"
f"{output}", endpoint)
return False
if not local:
return cid
cmd_line = f"frostfs-cli netmap nodeinfo --rpc-endpoint {endpoint} {wallet_file} {wallet_config}"
output, success = execute_cmd(cmd_line)
if not success:
log(f"{cmd_line}\n"
f"Failed to get nodeinfo\n"
f"{output}", endpoint)
return False
try:
fst_str = output.split('\n')[0]
except Exception:
log(f"{cmd_line}\n"
f"Incorrect output\n"
f"Output: {output or '<empty>'}", endpoint)
return False
splitted = fst_str.split(": ")
if len(splitted) != 2 or len(splitted[1]) == 0:
raise ValueError(f"no node key was parsed from command output:\t{fst_str}")
node_key = splitted[1]
cmd_line = f"frostfs-cli container nodes --rpc-endpoint {endpoint} {wallet_file} {wallet_config} --cid {cid}"
output, success = execute_cmd(cmd_line)
if not success:
log(f"{cmd_line}\n"
f"Failed to get container nodes\n"
f"{output}", endpoint)
return False
for output_str in output.split('\n'):
output_str = output_str.lstrip().rstrip()
if not output_str.startswith("Node "):
continue
splitted = output_str.split(": ")
if len(splitted) != 2 or len(splitted[1]) == 0:
continue
try:
k = splitted[1].split(" ")[0]
except Exception:
log(f"{cmd_line}\n"
f"Incorrect output\n"
f"Output: {output or '<empty>'}", endpoint)
continue
if k == node_key:
return cid
log(f"Created container {cid} is not stored on {endpoint}, creating another one...", endpoint)
return create_container(endpoint, policy, container_creation_retry, wallet_path, config, rules, local, retry + 1)
def upload_object(container, payload_filepath, endpoint, wallet_file, wallet_config): def upload_object(container, payload_filepath, endpoint, wallet_file, wallet_config):
object_name = "" object_name = ""
if wallet_file: cmd_line = f"frostfs-cli --rpc-endpoint {endpoint} object put --file {payload_filepath} --wallet {wallet_file} --config {wallet_config} " \
wallet_file = "--wallet " + wallet_file
if wallet_config:
wallet_config = "--config " + wallet_config
cmd_line = f"frostfs-cli --rpc-endpoint {endpoint} object put --file {payload_filepath} {wallet_file} {wallet_config} " \
f"--cid {container} --no-progress" f"--cid {container} --no-progress"
output, success = execute_cmd(cmd_line) output, success = execute_cmd(cmd_line)
if not success: if not success:
log(f"{cmd_line}\n" print(f" > Object {object_name} has not been uploaded:\n{output}")
f"Object {object_name} has not been uploaded\n"
f"Error: {output}", endpoint)
return False return False
else:
try: try:
# taking second string from command output # taking second string from command output
snd_str = output.split('\n')[1] snd_str = output.split('\n')[1]
except Exception: except Exception:
log(f"{cmd_line}\n" print(f"Got empty input: {output}")
f"Incorrect output\n"
f"Output: {output or '<empty>'}", endpoint)
return False return False
splitted = snd_str.split(": ") splitted = snd_str.split(": ")
if len(splitted) != 2: if len(splitted) != 2:
raise Exception(f"no OID was parsed from command output: \t{snd_str}") raise Exception(f"no OID was parsed from command output: \t{snd_str}")
return container, endpoint, splitted[1] return splitted[1]
def get_object(cid, oid, endpoint, out_filepath, wallet_file, wallet_config): def get_object(cid, oid, endpoint, out_filepath, wallet_file, wallet_config):
if wallet_file: cmd_line = f"frostfs-cli object get -r {endpoint} --cid {cid} --oid {oid} --wallet {wallet_file} --config {wallet_config} " \
wallet_file = "--wallet " + wallet_file
if wallet_config:
wallet_config = "--config " + wallet_config
cmd_line = f"frostfs-cli object get -r {endpoint} --cid {cid} --oid {oid} {wallet_file} {wallet_config} " \
f"--file {out_filepath}" f"--file {out_filepath}"
output, success = execute_cmd(cmd_line) output, success = execute_cmd(cmd_line)
if not success: if not success:
log(f"{cmd_line}\n" print(f" > Failed to get object {output} from container {cid} \r\n"
f"Failed to get object {oid} from container {cid}\n" f" > Error: {output}")
f"Error: {output}", endpoint)
return False return False
return True return True
def search_object_by_id(cid, oid, endpoint, wallet_file, wallet_config, ttl=2): def search_object_by_id(cid, oid, endpoint, wallet_file, wallet_config, ttl=2):
if wallet_file: cmd_line = f"frostfs-cli object search --ttl {ttl} -r {endpoint} --cid {cid} --oid {oid} --wallet {wallet_file} --config {wallet_config} "
wallet_file = "--wallet " + wallet_file
if wallet_config:
wallet_config = "--config " + wallet_config
cmd_line = f"frostfs-cli object search --ttl {ttl} -r {endpoint} --cid {cid} --oid {oid} {wallet_file} {wallet_config} "
output, success = execute_cmd(cmd_line) output, success = execute_cmd(cmd_line)
if not success: if not success:
log(f"{cmd_line}\n" print(f" > Failed to search object {oid} for container {cid} \r\n"
f"Failed to search object {oid} for container {cid}\n" f" > Error: {output}")
f"Error: {output}", endpoint)
return False return False
re_rst = re.search(r'Found (\d+) objects', output) re_rst = re.search(r'Found (\d+) objects', output)
if not re_rst: if not re_rst:
raise Exception("Failed to parse search results") raise Exception("Failed to parce search results")
return re_rst.group(1) return re_rst.group(1)

View file

@ -1,136 +1,110 @@
#!/usr/bin/python3 #!/usr/bin/python3
import argparse import argparse
from itertools import cycle
import json import json
import random
import sys import sys
import tempfile import tempfile
import time
from argparse import Namespace from argparse import Namespace
from concurrent.futures import ProcessPoolExecutor from concurrent.futures import ProcessPoolExecutor
from helpers.cmd import random_payload from helpers.cmd import random_payload
from helpers.frostfs_cli import create_container, upload_object from helpers.frostfs_cli import create_container, upload_object
ERROR_WRONG_CONTAINERS_COUNT = 1 ERROR_WRONG_CONTAINERS_COUNT = 1
ERROR_WRONG_OBJECTS_COUNT = 2 ERROR_WRONG_OBJECTS_COUNT = 2
MAX_WORKERS = 50 MAX_WORKERS = 50
DEFAULT_POLICY = "REP 2 IN X CBF 2 SELECT 2 FROM * AS X"
DEFAULT_RULES = ["allow Object.* *"]
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument('--size', help='Upload objects size in kb') parser.add_argument('--size', help='Upload objects size in kb')
parser.add_argument('--containers', help='Number of containers to create') parser.add_argument('--containers', help='Number of containers to create')
parser.add_argument('--retry', default=20, help='Maximum number of retries to create a container')
parser.add_argument('--out', help='JSON file with output') parser.add_argument('--out', help='JSON file with output')
parser.add_argument('--preload_obj', help='Number of pre-loaded objects') parser.add_argument('--preload_obj', help='Number of pre-loaded objects')
parser.add_argument('--wallet', help='Wallet file path') parser.add_argument('--wallet', help='Wallet file path')
parser.add_argument('--config', help='Wallet config file path') parser.add_argument('--config', help='Wallet config file path')
parser.add_argument( parser.add_argument(
"--policy", "--policy",
help=f"Container placement policy. Default is {DEFAULT_POLICY}", help="Container placement policy",
action="append" default="REP 2 IN X CBF 2 SELECT 2 FROM * AS X"
) )
parser.add_argument('--endpoint', help='Nodes addresses separated by comma.') parser.add_argument('--endpoint', help='Node address')
parser.add_argument('--update', help='Save existed containers') parser.add_argument('--update', help='Save existed containers')
parser.add_argument('--ignore-errors', help='Ignore preset errors', action='store_true') parser.add_argument('--ignore-errors', help='Ignore preset errors')
parser.add_argument('--workers', help='Count of workers in preset. Max = 50, Default = 50', default=50) parser.add_argument('--workers', help='Count of workers in preset. Max = 50, Default = 50', default=50)
parser.add_argument('--sleep', help='Time to sleep between containers creation and objects upload (in seconds), '
'Default = 8', default=8)
parser.add_argument('--local', help='Create containers that store data on provided endpoints. Warning: additional empty containers may be created.', action='store_true')
parser.add_argument(
'--rule',
help='Rule attached to created containers. All entries of CONTAINER_ID will be replaced with id of created container.',
action="append")
args: Namespace = parser.parse_args() args: Namespace = parser.parse_args()
print(args) print(args)
def main(): def main():
containers = [] container_list = []
objects_list = [] objects_list = []
endpoints = args.endpoint.split(',') endpoints = args.endpoint.split(',')
if not args.policy:
args.policy = [DEFAULT_POLICY]
container_creation_retry = args.retry
wallet = args.wallet wallet = args.wallet
wallet_config = args.config wallet_config = args.config
workers = int(args.workers) workers = int(args.workers)
objects_per_container = int(args.preload_obj) objects_per_container = int(args.preload_obj)
rules = args.rule
if not rules:
rules = DEFAULT_RULES
ignore_errors = args.ignore_errors ignore_errors = True if args.ignore_errors else False
if args.update: if args.update:
# Open file # Open file
with open(args.out) as f: with open(args.out) as f:
data_json = json.load(f) data_json = json.load(f)
containers = data_json['containers'] container_list = data_json['containers']
containers_count = len(containers) containers_count = len(container_list)
else: else:
containers_count = int(args.containers) containers_count = int(args.containers)
print(f"Create containers: {containers_count}") print(f"Create containers: {containers_count}")
with ProcessPoolExecutor(max_workers=min(MAX_WORKERS, workers)) as executor: with ProcessPoolExecutor(max_workers=min(MAX_WORKERS, workers)) as executor:
containers_runs = [executor.submit(create_container, endpoint, policy, container_creation_retry, wallet, wallet_config, rules, args.local) containers_runs = {executor.submit(create_container, endpoints[random.randrange(len(endpoints))],
for _, endpoint, policy in args.policy, wallet, wallet_config): _ for _ in range(containers_count)}
zip(range(containers_count), cycle(endpoints), cycle(args.policy))]
for run in containers_runs: for run in containers_runs:
container_id = run.result() if run.result():
if container_id: container_list.append(run.result())
containers.append(container_id)
print("Create containers: Completed") print("Create containers: Completed")
print(f" > Containers: {containers}") print(f" > Containers: {container_list}")
if containers_count > 0 and len(containers) != containers_count: if containers_count == 0 or len(container_list) != containers_count:
print(f"Containers mismatch in preset: expected {containers_count}, created {len(containers)}") print(f"Containers mismatch in preset: expected {containers_count}, created {len(container_list)}")
if not ignore_errors: if not ignore_errors:
sys.exit(ERROR_WRONG_CONTAINERS_COUNT) sys.exit(ERROR_WRONG_CONTAINERS_COUNT)
if args.sleep != 0:
print(f"Sleep for {args.sleep} seconds")
time.sleep(args.sleep)
print(f"Upload objects to each container: {args.preload_obj} ") print(f"Upload objects to each container: {args.preload_obj} ")
payload_file = tempfile.NamedTemporaryFile() payload_file = tempfile.NamedTemporaryFile()
random_payload(payload_file, args.size) random_payload(payload_file, args.size)
print(" > Create random payload: Completed") print(" > Create random payload: Completed")
total_objects = objects_per_container * containers_count for container in container_list:
print(f" > Upload objects for container {container}")
with ProcessPoolExecutor(max_workers=min(MAX_WORKERS, workers)) as executor: with ProcessPoolExecutor(max_workers=min(MAX_WORKERS, workers)) as executor:
objects_runs = [executor.submit(upload_object, container, payload_file.name, objects_runs = {executor.submit(upload_object, container, payload_file.name,
endpoint, wallet, wallet_config) endpoints[random.randrange(len(endpoints))], wallet, wallet_config): _ for _ in range(objects_per_container)}
for _, container, endpoint in
zip(range(total_objects), cycle(containers), cycle(endpoints))]
for run in objects_runs: for run in objects_runs:
result = run.result() if run.result():
if result: objects_list.append({'container': container, 'object': run.result()})
container_id = result[0] print(f" > Upload objects for container {container}: Completed")
endpoint = result[1]
object_id = result[2]
objects_list.append({'container': container_id, 'object': object_id})
print(f" > Uploaded object {object_id} for container {container_id} via endpoint {endpoint}.")
print("Upload objects to each container: Completed") print("Upload objects to each container: Completed")
total_objects = objects_per_container * containers_count
if total_objects > 0 and len(objects_list) != total_objects: if total_objects > 0 and len(objects_list) != total_objects:
print(f"Objects mismatch in preset: expected {total_objects}, created {len(objects_list)}") print(f"Objects mismatch in preset: expected {total_objects}, created {len(objects_list)}")
if not ignore_errors: if not ignore_errors:
sys.exit(ERROR_WRONG_OBJECTS_COUNT) sys.exit(ERROR_WRONG_OBJECTS_COUNT)
data = {'containers': containers, 'objects': objects_list, 'obj_size': args.size + " Kb"} data = {'containers': container_list, 'objects': objects_list, 'obj_size': args.size + " Kb"}
with open(args.out, 'w+') as f: with open(args.out, 'w+') as f:
json.dump(data, f, ensure_ascii=False, indent=2) json.dump(data, f, ensure_ascii=False, indent=2)
print("Result:") print("Result:")
print(f" > Total Containers has been created: {len(containers)}.") print(f" > Total Containers has been created: {len(container_list)}.")
print(f" > Total Objects has been created: {len(objects_list)}.") print(f" > Total Objects has been created: {len(objects_list)}.")

View file

@ -1,53 +1,39 @@
#!/usr/bin/python3 #!/usr/bin/python3
import argparse import argparse
from itertools import cycle
import json import json
import sys import sys
import tempfile import tempfile
import time
from concurrent.futures import ProcessPoolExecutor from concurrent.futures import ProcessPoolExecutor
from helpers.cmd import random_payload from helpers.cmd import random_payload
from helpers.aws_cli import create_bucket, upload_object from helpers.aws_cli import create_bucket, upload_object
ERROR_WRONG_CONTAINERS_COUNT = 1
ERROR_WRONG_OBJECTS_COUNT = 2
ERROR_WRONG_PERCENTAGE = 3
MAX_WORKERS = 50
DEFAULT_LOCATION = ""
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument('--size', help='Upload objects size in kb.') parser.add_argument('--size', help='Upload objects size in kb.')
parser.add_argument('--buckets', help='Number of buckets to create.') parser.add_argument('--buckets', help='Number of buckets to create.')
parser.add_argument('--out', help='JSON file with output.') parser.add_argument('--out', help='JSON file with output.')
parser.add_argument('--preload_obj', help='Number of pre-loaded objects.') parser.add_argument('--preload_obj', help='Number of pre-loaded objects.')
parser.add_argument('--endpoint', help='S3 Gateways addresses separated by comma.') parser.add_argument('--endpoint', help='S3 Gateway address.')
parser.add_argument('--update', help='True/False, False by default. Save existed buckets from target file (--out). ' parser.add_argument('--update', help='True/False, False by default. Save existed buckets from target file (--out). '
'New buckets will not be created.') 'New buckets will not be created.')
parser.add_argument('--location', help=f'AWS location constraint. Default is "{DEFAULT_LOCATION}"', action="append") parser.add_argument('--location', help='AWS location. Will be empty, if has not be declared.', default="")
parser.add_argument('--versioning', help='True/False, False by default. Alias of --buckets_versioned=100') parser.add_argument('--versioning', help='True/False, False by default.')
parser.add_argument('--buckets_versioned', help='Percent of versioned buckets. Default is 0', default=0) parser.add_argument('--ignore-errors', help='Ignore preset errors')
parser.add_argument('--ignore-errors', help='Ignore preset errors', action='store_true')
parser.add_argument('--no-verify-ssl', help='Ignore SSL verifications', action='store_true')
parser.add_argument('--workers', help='Count of workers in preset. Max = 50, Default = 50', default=50) parser.add_argument('--workers', help='Count of workers in preset. Max = 50, Default = 50', default=50)
parser.add_argument('--sleep', help='Time to sleep between buckets creation and objects upload (in seconds), '
'Default = 8', default=8)
parser.add_argument('--acl', help='Bucket ACL. Default is private. Expected values are: private, public-read or public-read-write.', default="private")
args = parser.parse_args() args = parser.parse_args()
print(args) print(args)
def main(): ERROR_WRONG_CONTAINERS_COUNT = 1
buckets = [] ERROR_WRONG_OBJECTS_COUNT = 2
objects_list = [] MAX_WORKERS = 50
ignore_errors = args.ignore_errors
no_verify_ssl = args.no_verify_ssl
endpoints = args.endpoint.split(',') def main():
if not args.location: bucket_list = []
args.location = [DEFAULT_LOCATION] objects_list = []
ignore_errors = True if args.ignore_errors else False
workers = int(args.workers) workers = int(args.workers)
objects_per_bucket = int(args.preload_obj) objects_per_bucket = int(args.preload_obj)
@ -56,77 +42,60 @@ def main():
# Open file # Open file
with open(args.out) as f: with open(args.out) as f:
data_json = json.load(f) data_json = json.load(f)
buckets = data_json['buckets'] bucket_list = data_json['buckets']
buckets_count = len(buckets) buckets_count = len(bucket_list)
# Get CID list # Get CID list
else: else:
buckets_count = int(args.buckets) buckets_count = int(args.buckets)
print(f"Create buckets: {buckets_count}") print(f"Create buckets: {buckets_count}")
with ProcessPoolExecutor(max_workers=min(MAX_WORKERS, workers)) as executor: with ProcessPoolExecutor(max_workers=min(MAX_WORKERS, workers)) as executor:
if not 0 <= int(args.buckets_versioned) <= 100: buckets_runs = {executor.submit(create_bucket, args.endpoint, args.versioning,
print(f"Percent of versioned buckets must be between 0 and 100: got {args.buckets_versioned}") args.location): _ for _ in range(buckets_count)}
if not ignore_errors:
sys.exit(ERROR_WRONG_PERCENTAGE)
if args.versioning == "True":
versioning_per_bucket = [True] * buckets_count
else:
num_versioned_buckets = int((int(args.buckets_versioned) / 100) * buckets_count)
versioning_per_bucket = [True] * num_versioned_buckets + [False] * (buckets_count - num_versioned_buckets)
buckets_runs = [executor.submit(create_bucket, endpoint, versioning_per_bucket[i], location, args.acl, no_verify_ssl)
for i, endpoint, location in
zip(range(buckets_count), cycle(endpoints), cycle(args.location))]
for run in buckets_runs: for run in buckets_runs:
bucket_name = run.result() if run.result():
if bucket_name: bucket_list.append(run.result())
buckets.append(bucket_name)
print("Create buckets: Completed") print("Create buckets: Completed")
print(f" > Buckets: {buckets}") print(f" > Buckets: {bucket_list}")
if buckets_count > 0 and len(buckets) != buckets_count: if buckets_count == 0 or len(bucket_list) != buckets_count:
print(f"Buckets mismatch in preset: expected {buckets_count}, created {len(buckets)}") print(f"Buckets mismatch in preset: expected {buckets_count}, created {len(bucket_list)}")
if not ignore_errors: if not ignore_errors:
sys.exit(ERROR_WRONG_CONTAINERS_COUNT) sys.exit(ERROR_WRONG_CONTAINERS_COUNT)
if args.sleep != 0:
print(f"Sleep for {args.sleep} seconds")
time.sleep(args.sleep)
print(f"Upload objects to each bucket: {objects_per_bucket} ") print(f"Upload objects to each bucket: {objects_per_bucket} ")
payload_file = tempfile.NamedTemporaryFile() payload_file = tempfile.NamedTemporaryFile()
random_payload(payload_file, args.size) random_payload(payload_file, args.size)
print(" > Create random payload: Completed") print(" > Create random payload: Completed")
total_objects = objects_per_bucket * buckets_count for bucket in bucket_list:
print(f" > Upload objects for bucket {bucket}")
with ProcessPoolExecutor(max_workers=min(MAX_WORKERS, workers)) as executor: with ProcessPoolExecutor(max_workers=min(MAX_WORKERS, workers)) as executor:
objects_runs = [executor.submit(upload_object, bucket, payload_file.name, endpoint, no_verify_ssl) objects_runs = {executor.submit(upload_object, bucket, payload_file.name,
for _, bucket, endpoint in args.endpoint): _ for _ in range(objects_per_bucket)}
zip(range(total_objects), cycle(buckets), cycle(endpoints))]
for run in objects_runs: for run in objects_runs:
result = run.result() if run.result():
if result: objects_list.append({'bucket': bucket, 'object': run.result()})
bucket = result[0] print(f" > Upload objects for bucket {bucket}: Completed")
endpoint = result[1]
object_id = result[2]
objects_list.append({'bucket': bucket, 'object': object_id})
print(f" > Uploaded object {object_id} for bucket {bucket} via endpoint {endpoint}.")
print("Upload objects to each bucket: Completed")
total_objects = objects_per_bucket * buckets_count
if total_objects > 0 and len(objects_list) != total_objects: if total_objects > 0 and len(objects_list) != total_objects:
print(f"Objects mismatch in preset: expected {total_objects}, created {len(objects_list)}") print(f"Objects mismatch in preset: expected {total_objects}, created {len(objects_list)}")
if not ignore_errors: if not ignore_errors:
sys.exit(ERROR_WRONG_OBJECTS_COUNT) sys.exit(ERROR_WRONG_OBJECTS_COUNT)
data = {'buckets': buckets, 'objects': objects_list, 'obj_size': args.size + " Kb"} data = {'buckets': bucket_list, 'objects': objects_list, 'obj_size': args.size + " Kb"}
with open(args.out, 'w+') as f: with open(args.out, 'w+') as f:
json.dump(data, f, ensure_ascii=False, indent=2) json.dump(data, f, ensure_ascii=False, indent=2)
print("Result:") print("Result:")
print(f" > Total Buckets has been created: {len(buckets)}.") print(f" > Total Buckets has been created: {len(bucket_list)}.")
print(f" > Total Objects has been created: {len(objects_list)}.") print(f" > Total Objects has been created: {len(objects_list)}.")

View file

@ -2,8 +2,7 @@
import argparse import argparse
import json import json
import http.client import requests
import ssl
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument('--endpoint', help='Endpoint of the S3 gateway') parser.add_argument('--endpoint', help='Endpoint of the S3 gateway')
@ -17,13 +16,10 @@ def main():
preset = json.loads(preset_text) preset = json.loads(preset_text)
conn = http.client.HTTPSConnection(args.endpoint, context = ssl._create_unverified_context())
containers = [] containers = []
for bucket in preset.get('buckets'): for bucket in preset.get('buckets'):
conn.request("HEAD", f'/{bucket}') resp = requests.head(f'{args.endpoint}/{bucket}', verify=False)
response = conn.getresponse() containers.append(resp.headers['X-Container-Id'])
containers.append(response.getheader('X-Container-Id'))
response.read()
preset['containers'] = containers preset['containers'] = containers
with open(args.preset_file, 'w+') as f: with open(args.preset_file, 'w+') as f:

View file

@ -19,8 +19,6 @@ Scenarios `grpc.js`, `local.js`, `http.js` and `s3.js` support the following opt
* `SLEEP_READ` - time interval (in seconds) between reading VU iterations. * `SLEEP_READ` - time interval (in seconds) between reading VU iterations.
* `SELECTION_SIZE` - size of batch to select for deletion (default: 1000). * `SELECTION_SIZE` - size of batch to select for deletion (default: 1000).
* `PAYLOAD_TYPE` - type of an object payload ("random" or "text", default: "random"). * `PAYLOAD_TYPE` - type of an object payload ("random" or "text", default: "random").
* `STREAMING` - if set, the payload is generated on the fly and is not read into memory fully.
* `METRIC_TAGS` - custom metrics tags (format `tag1:value1;tag2:value2`).
Additionally, the profiling extension can be enabled to generate CPU and memory profiles which can be inspected with `go tool pprof file.prof`: Additionally, the profiling extension can be enabled to generate CPU and memory profiles which can be inspected with `go tool pprof file.prof`:
```shell ```shell
@ -71,15 +69,13 @@ $ ./scenarios/preset/preset_grpc.py --size 1024 --containers 1 --out grpc.json -
2. Execute scenario with options: 2. Execute scenario with options:
```shell ```shell
$ ./k6 run -e DURATION=60 -e WRITE_OBJ_SIZE=8192 -e READERS=20 -e WRITERS=20 -e DELETERS=30 -e DELETE_AGE=10 -e REGISTRY_FILE=registry.bolt -e CONFIG_FILE=/path/to/config.yaml -e CONFIG_DIR=/path/to/dir/ -e PREGEN_JSON=./grpc.json scenarios/local.js $ ./k6 run -e DURATION=60 -e WRITE_OBJ_SIZE=8192 -e READERS=20 -e WRITERS=20 -e DELETERS=30 -e DELETE_AGE=10 -e REGISTRY_FILE=registry.bolt -e CONFIG_FILE=/path/to/config.yaml -e PREGEN_JSON=./grpc.json scenarios/local.js
``` ```
Options (in addition to the common options): Options (in addition to the common options):
* `CONFIG_FILE` - path to the local configuration file used for the storage node. Only the storage configuration section is used. * `CONFIG_FILE` - path to the local configuration file used for the storage node. Only the storage configuration section is used.
* `CONFIG_DIR` - path to the folder with local configuration files used for the storage node.
* `DELETERS` - number of VUs performing delete operations (using deleters requires that options `DELETE_AGE` and `REGISTRY_FILE` are specified as well). * `DELETERS` - number of VUs performing delete operations (using deleters requires that options `DELETE_AGE` and `REGISTRY_FILE` are specified as well).
* `DELETE_AGE` - age of object in seconds before which it can not be deleted. This parameter can be used to control how many objects we have in the system under load. * `DELETE_AGE` - age of object in seconds before which it can not be deleted. This parameter can be used to control how many objects we have in the system under load.
* `MAX_TOTAL_SIZE_GB` - if specified, max payload size in GB of the storage engine. If the storage engine is already full, no new objects will be saved.
## HTTP ## HTTP
@ -125,7 +121,7 @@ The tests will use all pre-created buckets for PUT operations and all pre-create
$ ./scenarios/preset/preset_s3.py --size 1024 --buckets 1 --out s3_1024kb.json --endpoint host1:8084 --preload_obj 500 --location load-1-4 $ ./scenarios/preset/preset_s3.py --size 1024 --buckets 1 --out s3_1024kb.json --endpoint host1:8084 --preload_obj 500 --location load-1-4
``` ```
* '--location' - specify the name of container policy (from policy.json file). It's important to run 'aws configure' each time when the policy file has been changed to pick up the latest policies. * '--location' - specify the name of container policy (from policy.json file). It's important to run 'aws configure' each time when the policy file has been changed to pick up the latest policies.
* '--buckets_versioned' - specify the percentage of versioned buckets from the total number of created buckets. Default is 0
3. Execute scenario with options: 3. Execute scenario with options:
```shell ```shell
@ -138,33 +134,6 @@ Options (in addition to the common options):
* `DELETE_AGE` - age of object in seconds before which it can not be deleted. This parameter can be used to control how many objects we have in the system under load. * `DELETE_AGE` - age of object in seconds before which it can not be deleted. This parameter can be used to control how many objects we have in the system under load.
* `SLEEP_DELETE` - time interval (in seconds) between deleting VU iterations. * `SLEEP_DELETE` - time interval (in seconds) between deleting VU iterations.
* `OBJ_NAME` - if specified, this name will be used for all write operations instead of random generation. * `OBJ_NAME` - if specified, this name will be used for all write operations instead of random generation.
* `OBJ_NAME_LENGTH` - if specified, then name of the object will be generated with the specified length of ASCII characters.
* `DIR_HEIGHT`, `DIR_WIDTH` - if both specified, object name will consist of `DIR_HEIGHT` directories, each of which can have `DIR_WIDTH` subdirectories, for example for `DIR_HEIGHT = 3, DIR_WIDTH = 100`, object names will be `/dir{1...100}/dir{1...100}/dir{1...100}/{uuid || OBJ_NAME}`
## S3 Multipart
Perform multipart upload operation, break up large objects, so they can be transferred in multiple parts, in parallel
```shell
$ ./k6 run -e DURATION=600 \
-e WRITERS=400 -e WRITERS_MULTIPART=10 \
-e WRITE_OBJ_SIZE=524288 -e WRITE_OBJ_PART_SIZE=10240 \
-e S3_ENDPOINTS=10.78.70.142:8084,10.78.70.143:8084,10.78.70.144:8084,10.78.70.145:8084 \
-e PREGEN_JSON=/home/service/s3_4kb.json \
scenarios/s3_multipart.js
```
Options:
* `DURATION` - duration of scenario in seconds.
* `REGISTRY_FILE` - if set, all produced objects will be stored in database for subsequent verification. Database file name will be set to the value of `REGISTRY_FILE`.
* `PREGEN_JSON` - path to json file with pre-generated containers.
* `SLEEP_WRITE` - time interval (in seconds) between writing VU iterations.
* `PAYLOAD_TYPE` - type of an object payload ("random" or "text", default: "random").
* `S3_ENDPOINTS` - - endpoints of S3 gateways in format `host:port`. To specify multiple endpoints separate them by comma.
* `WRITERS` - number of VUs performing upload payload operation
* `WRITERS_MULTIPART` - number of goroutines that will upload parts in parallel
* `WRITE_OBJ_SIZE` - object size in kb for write(PUT) operations.
* `WRITE_OBJ_PART_SIZE` - part size in kb for multipart upload operations (must be greater or equal 5mb).
## S3 Local ## S3 Local
@ -181,50 +150,13 @@ After this, the `pregen.json` file will contain a `containers` list field the sa
3. Execute the scenario with the desired options. For example: 3. Execute the scenario with the desired options. For example:
```shell ```shell
$ ./k6 run -e DURATION=60 -e WRITE_OBJ_SIZE=8192 -e READERS=20 -e WRITERS=20 -e CONFIG_FILE=/path/to/node/config.yml -e CONFIG_DIR=/path/to/dir/ -e PREGEN_JSON=pregen.json scenarios/s3local.js $ ./k6 run -e DURATION=60 -e WRITE_OBJ_SIZE=8192 -e READERS=20 -e WRITERS=20 -e CONFIG_FILE=/path/to/node/config.yml -e PREGEN_JSON=pregen.json scenarios/s3local.js
``` ```
Note that the `s3local` scenario currently does not support deleters. Note that the `s3local` scenario currently does not support deleters.
Options (in addition to the common options): Options (in addition to the common options):
* `OBJ_NAME` - if specified, this name will be used for all write operations instead of random generation. * `OBJ_NAME` - if specified, this name will be used for all write operations instead of random generation.
* `MAX_TOTAL_SIZE_GB` - if specified, max payload size in GB of the storage engine. If the storage engine is already full, no new objects will be saved.
## Export metrics
To export metrics to Prometheus (also Grafana and Victoria Metrics support Prometheus format), you need to run `k6` with an option `-o experimental-prometheus-rw` and
an environment variable `K6_PROMETHEUS_RW_SERVER_URL` whose value corresponds to the URL for the remote write endpoint.
To specify percentiles for trend metrics, use an environment variable `K6_PROMETHEUS_RW_TREND_STATS`.
See [k6 docs](https://k6.io/docs/results-output/real-time/prometheus-remote-write/) for a list of all possible options.
To distinct metrics from different loaders, use an option `METRIC_TAGS`. These tags does not apply to builtin `k6` metrics.
Example:
```bash
K6_PROMETHEUS_RW_SERVER_URL=http://host:8428/api/v1/write \
K6_PROMETHEUS_RW_TREND_STATS="p(95),p(99),min,max" \
./k6 run ... -o experimental-prometheus-rw -e METRIC_TAGS="instance:server1;run:run1" scenario.js
```
## Grafana annotations
There is no option to export Grafana annotaions, but it can be easily done with `curl` and Grafana's annotations API.
Example:
```shell
curl --request POST \
--url https://user:password@grafana.host/api/annotations \
--header 'Content-Type: application/json' \
--data '{
"dashboardUID": "YsVWNpMIk",
"time": 1706533045014,
"timeEnd": 1706533085100,
"tags": [
"tag1",
"tag2"
],
"text": "Test annotation"
}'
```
See [Grafana docs](https://grafana.com/docs/grafana/latest/developers/http_api/annotations/) for details.
## Verify ## Verify

View file

@ -1,100 +1,74 @@
import {sleep} from 'k6'; import datagen from 'k6/x/frostfs/datagen';
import {SharedArray} from 'k6/data';
import exec from 'k6/execution';
import logging from 'k6/x/frostfs/logging'; import logging from 'k6/x/frostfs/logging';
import registry from 'k6/x/frostfs/registry'; import registry from 'k6/x/frostfs/registry';
import s3 from 'k6/x/frostfs/s3'; import s3 from 'k6/x/frostfs/s3';
import stats from 'k6/x/frostfs/stats'; import { SharedArray } from 'k6/data';
import { sleep } from 'k6';
import {generateS3Key} from './libs/keygen.js'; import { textSummary } from './libs/k6-summary-0.0.2.js';
import {parseEnv} from './libs/env-parser.js'; import { parseEnv } from './libs/env-parser.js';
import {textSummary} from './libs/k6-summary-0.0.2.js'; import { uuidv4 } from './libs/k6-utils-1.4.0.js';
import {newGenerator} from './libs/datagen.js';
parseEnv(); parseEnv();
const obj_list = new SharedArray( const obj_list = new SharedArray('obj_list', function () {
'obj_list', return JSON.parse(open(__ENV.PREGEN_JSON)).objects;
function() { return JSON.parse(open(__ENV.PREGEN_JSON)).objects; }); });
const bucket_list = new SharedArray( const bucket_list = new SharedArray('bucket_list', function () {
'bucket_list', return JSON.parse(open(__ENV.PREGEN_JSON)).buckets;
function() { return JSON.parse(open(__ENV.PREGEN_JSON)).buckets; }); });
const read_size = JSON.parse(open(__ENV.PREGEN_JSON)).obj_size; const read_size = JSON.parse(open(__ENV.PREGEN_JSON)).obj_size;
const summary_json = __ENV.SUMMARY_JSON || '/tmp/summary.json'; const summary_json = __ENV.SUMMARY_JSON || "/tmp/summary.json";
const no_verify_ssl = __ENV.NO_VERIFY_SSL || 'true';
const connection_args = {
no_verify_ssl : no_verify_ssl
}
// Select random S3 endpoint for current VU // Select random S3 endpoint for current VU
const s3_endpoints = __ENV.S3_ENDPOINTS.split(','); const s3_endpoints = __ENV.S3_ENDPOINTS.split(',');
const s3_endpoint = const s3_endpoint = s3_endpoints[Math.floor(Math.random() * s3_endpoints.length)];
s3_endpoints[Math.floor(Math.random() * s3_endpoints.length)]; const s3_client = s3.connect(`http://${s3_endpoint}`);
const s3_client = s3.connect(s3_endpoint, connection_args); const log = logging.new().withField("endpoint", s3_endpoint);
const log = logging.new().withField('endpoint', s3_endpoint);
const registry_enabled = !!__ENV.REGISTRY_FILE; const registry_enabled = !!__ENV.REGISTRY_FILE;
const obj_registry = const obj_registry = registry_enabled ? registry.open(__ENV.REGISTRY_FILE) : undefined;
registry_enabled ? registry.open(__ENV.REGISTRY_FILE) : undefined;
const duration = __ENV.DURATION; const duration = __ENV.DURATION;
if (!!__ENV.METRIC_TAGS) { const delete_age = __ENV.DELETE_AGE ? parseInt(__ENV.DELETE_AGE) : undefined;
stats.setTags(__ENV.METRIC_TAGS) let obj_to_delete_selector = undefined;
if (registry_enabled && delete_age) {
obj_to_delete_selector = registry.getSelector(
__ENV.REGISTRY_FILE,
"obj_to_delete",
__ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0,
{
status: "created",
age: delete_age,
}
);
} }
const read_age = __ENV.READ_AGE ? parseInt(__ENV.READ_AGE) : 10; const generator = datagen.generator(1024 * parseInt(__ENV.WRITE_OBJ_SIZE), __ENV.PAYLOAD_TYPE || "");
let obj_to_read_selector = undefined;
if (registry_enabled) {
obj_to_read_selector = registry.getLoopedSelector(
__ENV.REGISTRY_FILE, 'obj_to_read',
__ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0, {
status : 'created',
age : read_age,
})
}
const scenarios = {}; const scenarios = {};
const write_vu_count = parseInt(__ENV.WRITERS || '0'); const write_vu_count = parseInt(__ENV.WRITERS || '0');
const generator = newGenerator(write_vu_count > 0);
if (write_vu_count > 0) { if (write_vu_count > 0) {
scenarios.write = { scenarios.write = {
executor : 'constant-vus', executor: 'constant-vus',
vus : write_vu_count, vus: write_vu_count,
duration : `${duration}s`, duration: `${duration}s`,
exec : 'obj_write', exec: 'obj_write',
gracefulStop : '5s', gracefulStop: '5s',
}; };
} }
const delete_age = __ENV.DELETE_AGE ? parseInt(__ENV.DELETE_AGE) : undefined;
let obj_to_delete_selector = undefined;
let obj_to_delete_exit_on_null = undefined;
if (registry_enabled && delete_age) {
obj_to_delete_exit_on_null = write_vu_count == 0;
let constructor = obj_to_delete_exit_on_null ? registry.getOneshotSelector
: registry.getSelector;
obj_to_delete_selector =
constructor(__ENV.REGISTRY_FILE, 'obj_to_delete',
__ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0, {
status : 'created',
age : delete_age,
});
}
const read_vu_count = parseInt(__ENV.READERS || '0'); const read_vu_count = parseInt(__ENV.READERS || '0');
if (read_vu_count > 0) { if (read_vu_count > 0) {
scenarios.read = { scenarios.read = {
executor : 'constant-vus', executor: 'constant-vus',
vus : read_vu_count, vus: read_vu_count,
duration : `${duration}s`, duration: `${duration}s`,
exec : 'obj_read', exec: 'obj_read',
gracefulStop : '5s', gracefulStop: '5s',
}; };
} }
@ -105,17 +79,17 @@ if (delete_vu_count > 0) {
} }
scenarios.delete = { scenarios.delete = {
executor : 'constant-vus', executor: 'constant-vus',
vus : delete_vu_count, vus: delete_vu_count,
duration : `${duration}s`, duration: `${duration}s`,
exec : 'obj_delete', exec: 'obj_delete',
gracefulStop : '5s', gracefulStop: '5s',
}; };
} }
export const options = { export const options = {
scenarios, scenarios,
setupTimeout : '5s', setupTimeout: '5s',
}; };
export function setup() { export function setup() {
@ -128,49 +102,38 @@ export function setup() {
console.log(`Writing VUs: ${write_vu_count}`); console.log(`Writing VUs: ${write_vu_count}`);
console.log(`Deleting VUs: ${delete_vu_count}`); console.log(`Deleting VUs: ${delete_vu_count}`);
console.log(`Total VUs: ${total_vu_count}`); console.log(`Total VUs: ${total_vu_count}`);
const start_timestamp = Date.now()
console.log(
`Load started at: ${Date(start_timestamp).toString()}`)
if (delete_vu_count > 0){
obj_to_delete_selector.sync.add(delete_vu_count)
}
} }
export function teardown(data) { export function teardown(data) {
if (obj_registry) { if (obj_registry) {
obj_registry.close(); obj_registry.close();
} }
const end_timestamp = Date.now()
console.log(
`Load finished at: ${Date(end_timestamp).toString()}`)
} }
export function handleSummary(data) { export function handleSummary(data) {
return { return {
'stdout' : textSummary(data, {indent : ' ', enableColors : false}), 'stdout': textSummary(data, { indent: ' ', enableColors: false }),
[summary_json] : JSON.stringify(data), [summary_json]: JSON.stringify(data),
}; };
} }
export function obj_write() { export function obj_write() {
if (__ENV.SLEEP_WRITE) { if (__ENV.SLEEP_WRITE) {
sleep(__ENV.SLEEP_WRITE); sleep(__ENV.SLEEP_WRITE);
} }
const key = generateS3Key(); const key = __ENV.OBJ_NAME || uuidv4();
const bucket = bucket_list[Math.floor(Math.random() * bucket_list.length)]; const bucket = bucket_list[Math.floor(Math.random() * bucket_list.length)];
const payload = generator.genPayload(); const { payload, hash } = generator.genPayload(registry_enabled);
const resp = s3_client.put(bucket, key, payload); const resp = s3_client.put(bucket, key, payload);
if (!resp.success) { if (!resp.success) {
log.withFields({bucket : bucket, key : key}).error(resp.error); log.withFields({bucket: bucket, key: key}).error(resp.error);
return; return;
} }
if (obj_registry) { if (obj_registry) {
obj_registry.addObject('', '', bucket, key, payload.hash()); obj_registry.addObject("", "", bucket, key, hash);
} }
} }
@ -179,24 +142,11 @@ export function obj_read() {
sleep(__ENV.SLEEP_READ); sleep(__ENV.SLEEP_READ);
} }
if (obj_to_read_selector) {
const obj = obj_to_read_selector.nextObject();
if (!obj) {
return;
}
const resp = s3_client.get(obj.s3_bucket, obj.s3_key)
if (!resp.success) {
log.withFields({bucket : obj.s3_bucket, key : obj.s3_key})
.error(resp.error);
}
return
}
const obj = obj_list[Math.floor(Math.random() * obj_list.length)]; const obj = obj_list[Math.floor(Math.random() * obj_list.length)];
const resp = s3_client.get(obj.bucket, obj.object); const resp = s3_client.get(obj.bucket, obj.object);
if (!resp.success) { if (!resp.success) {
log.withFields({bucket : obj.bucket, key : obj.object}).error(resp.error); log.withFields({bucket: obj.bucket, key: obj.object}).error(resp.error);
} }
} }
@ -207,18 +157,12 @@ export function obj_delete() {
const obj = obj_to_delete_selector.nextObject(); const obj = obj_to_delete_selector.nextObject();
if (!obj) { if (!obj) {
if (obj_to_delete_exit_on_null) {
obj_to_delete_selector.sync.done()
obj_to_delete_selector.sync.wait()
exec.test.abort("No more objects to select");
}
return; return;
} }
const resp = s3_client.delete(obj.s3_bucket, obj.s3_key); const resp = s3_client.delete(obj.s3_bucket, obj.s3_key);
if (!resp.success) { if (!resp.success) {
log.withFields({bucket : obj.s3_bucket, key : obj.s3_key, op : 'DELETE'}) log.withFields({bucket: obj.s3_bucket, key: obj.s3_key, op: "DELETE"}).error(resp.error);
.error(resp.error);
return; return;
} }

View file

@ -1,70 +1,52 @@
import {sleep} from 'k6'; import datagen from 'k6/x/frostfs/datagen';
import {SharedArray} from 'k6/data';
import logging from 'k6/x/frostfs/logging'; import logging from 'k6/x/frostfs/logging';
import registry from 'k6/x/frostfs/registry'; import registry from 'k6/x/frostfs/registry';
import s3 from 'k6/x/frostfs/s3'; import s3 from 'k6/x/frostfs/s3';
import stats from 'k6/x/frostfs/stats'; import { SharedArray } from 'k6/data';
import { sleep } from 'k6';
import {generateS3Key} from './libs/keygen.js'; import { textSummary } from './libs/k6-summary-0.0.2.js';
import {newGenerator} from './libs/datagen.js'; import { parseEnv } from './libs/env-parser.js';
import {parseEnv} from './libs/env-parser.js'; import { uuidv4 } from './libs/k6-utils-1.4.0.js';
import {textSummary} from './libs/k6-summary-0.0.2.js';
parseEnv(); parseEnv();
const obj_list = new SharedArray('obj_list', function() { const obj_list = new SharedArray('obj_list', function () {
return JSON.parse(open(__ENV.PREGEN_JSON)).objects; return JSON.parse(open(__ENV.PREGEN_JSON)).objects;
}); });
const bucket_list = new SharedArray('bucket_list', function() { const bucket_list = new SharedArray('bucket_list', function () {
return JSON.parse(open(__ENV.PREGEN_JSON)).buckets; return JSON.parse(open(__ENV.PREGEN_JSON)).buckets;
}); });
const read_size = JSON.parse(open(__ENV.PREGEN_JSON)).obj_size; const read_size = JSON.parse(open(__ENV.PREGEN_JSON)).obj_size;
const summary_json = __ENV.SUMMARY_JSON || '/tmp/summary.json'; const summary_json = __ENV.SUMMARY_JSON || "/tmp/summary.json";
// Select random S3 endpoint for current VU // Select random S3 endpoint for current VU
const s3_endpoints = __ENV.S3_ENDPOINTS.split(','); const s3_endpoints = __ENV.S3_ENDPOINTS.split(',');
const s3_endpoint = const s3_endpoint = s3_endpoints[Math.floor(Math.random() * s3_endpoints.length)];
s3_endpoints[Math.floor(Math.random() * s3_endpoints.length)]; const s3_client = s3.connect(`http://${s3_endpoint}`);
const no_verify_ssl = __ENV.NO_VERIFY_SSL || 'true'; const log = logging.new().withField("endpoint", s3_endpoint);
const connection_args = {
no_verify_ssl: no_verify_ssl
};
const s3_client = s3.connect(s3_endpoint, connection_args);
const log = logging.new().withField('endpoint', s3_endpoint);
const registry_enabled = !!__ENV.REGISTRY_FILE; const registry_enabled = !!__ENV.REGISTRY_FILE;
const obj_registry = const obj_registry = registry_enabled ? registry.open(__ENV.REGISTRY_FILE) : undefined;
registry_enabled ? registry.open(__ENV.REGISTRY_FILE) : undefined;
const duration = __ENV.DURATION; const duration = __ENV.DURATION;
if (!!__ENV.METRIC_TAGS) {
stats.setTags(__ENV.METRIC_TAGS)
}
const delete_age = __ENV.DELETE_AGE ? parseInt(__ENV.DELETE_AGE) : undefined; const delete_age = __ENV.DELETE_AGE ? parseInt(__ENV.DELETE_AGE) : undefined;
let obj_to_delete_selector = undefined; let obj_to_delete_selector = undefined;
if (registry_enabled && delete_age) { if (registry_enabled && delete_age) {
obj_to_delete_selector = registry.getSelector( obj_to_delete_selector = registry.getSelector(
__ENV.REGISTRY_FILE, 'obj_to_delete', __ENV.REGISTRY_FILE,
__ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0, { "obj_to_delete",
status: 'created', __ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0,
{
status: "created",
age: delete_age, age: delete_age,
}); }
);
} }
const read_age = __ENV.READ_AGE ? parseInt(__ENV.READ_AGE) : 10; const generator = datagen.generator(1024 * parseInt(__ENV.WRITE_OBJ_SIZE));
let obj_to_read_selector = undefined;
if (registry_enabled) {
obj_to_read_selector = registry.getLoopedSelector(
__ENV.REGISTRY_FILE, 'obj_to_read',
__ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0, {
status: 'created',
age: read_age,
})
}
const scenarios = {}; const scenarios = {};
@ -72,7 +54,6 @@ const time_unit = __ENV.TIME_UNIT || '1s';
const pre_alloc_write_vus = parseInt(__ENV.PRE_ALLOC_WRITERS || '0'); const pre_alloc_write_vus = parseInt(__ENV.PRE_ALLOC_WRITERS || '0');
const max_write_vus = parseInt(__ENV.MAX_WRITERS || pre_alloc_write_vus); const max_write_vus = parseInt(__ENV.MAX_WRITERS || pre_alloc_write_vus);
const write_rate = parseInt(__ENV.WRITE_RATE || '0'); const write_rate = parseInt(__ENV.WRITE_RATE || '0');
const generator = newGenerator(write_rate > 0);
if (write_rate > 0) { if (write_rate > 0) {
scenarios.write = { scenarios.write = {
executor: 'constant-arrival-rate', executor: 'constant-arrival-rate',
@ -109,8 +90,7 @@ const max_delete_vus = parseInt(__ENV.MAX_DELETERS || pre_alloc_write_vus);
const delete_rate = parseInt(__ENV.DELETE_RATE || '0'); const delete_rate = parseInt(__ENV.DELETE_RATE || '0');
if (delete_rate > 0) { if (delete_rate > 0) {
if (!obj_to_delete_selector) { if (!obj_to_delete_selector) {
throw new Error( throw new Error('Positive DELETE worker number without a proper object selector');
'Positive DELETE worker number without a proper object selector');
} }
scenarios.delete = { scenarios.delete = {
@ -131,8 +111,7 @@ export const options = {
}; };
export function setup() { export function setup() {
const total_pre_allocated_vu_count = const total_pre_allocated_vu_count = pre_alloc_write_vus + pre_alloc_read_vus + pre_alloc_delete_vus;
pre_alloc_write_vus + pre_alloc_read_vus + pre_alloc_delete_vus;
const total_max_vu_count = max_read_vus + max_write_vus + max_delete_vus const total_max_vu_count = max_read_vus + max_write_vus + max_delete_vus
console.log(`Pregenerated buckets: ${bucket_list.length}`); console.log(`Pregenerated buckets: ${bucket_list.length}`);
@ -150,37 +129,30 @@ export function setup() {
console.log(`Read rate: ${read_rate}`); console.log(`Read rate: ${read_rate}`);
console.log(`Writing rate: ${write_rate}`); console.log(`Writing rate: ${write_rate}`);
console.log(`Delete rate: ${delete_rate}`); console.log(`Delete rate: ${delete_rate}`);
const start_timestamp = Date.now()
console.log(
`Load started at: ${Date(start_timestamp).toString()}`)
} }
export function teardown(data) { export function teardown(data) {
if (obj_registry) { if (obj_registry) {
obj_registry.close(); obj_registry.close();
} }
const end_timestamp = Date.now()
console.log(
`Load finished at: ${Date(end_timestamp).toString()}`)
} }
export function handleSummary(data) { export function handleSummary(data) {
return { return {
'stdout': textSummary(data, {indent: ' ', enableColors: false}), 'stdout': textSummary(data, { indent: ' ', enableColors: false }),
[summary_json]: JSON.stringify(data), [summary_json]: JSON.stringify(data),
}; };
} }
export function obj_write() { export function obj_write() {
if (__ENV.SLEEP_WRITE) { if (__ENV.SLEEP_WRITE) {
sleep(__ENV.SLEEP_WRITE); sleep(__ENV.SLEEP_WRITE);
} }
const key = generateS3Key(); const key = __ENV.OBJ_NAME || uuidv4();
const bucket = bucket_list[Math.floor(Math.random() * bucket_list.length)]; const bucket = bucket_list[Math.floor(Math.random() * bucket_list.length)];
const payload = generator.genPayload(); const { payload, hash } = generator.genPayload(registry_enabled);
const resp = s3_client.put(bucket, key, payload); const resp = s3_client.put(bucket, key, payload);
if (!resp.success) { if (!resp.success) {
log.withFields({bucket: bucket, key: key}).error(resp.error); log.withFields({bucket: bucket, key: key}).error(resp.error);
@ -188,7 +160,7 @@ export function obj_write() {
} }
if (obj_registry) { if (obj_registry) {
obj_registry.addObject('', '', bucket, key, payload.hash()); obj_registry.addObject("", "", bucket, key, hash);
} }
} }
@ -197,19 +169,6 @@ export function obj_read() {
sleep(__ENV.SLEEP_READ); sleep(__ENV.SLEEP_READ);
} }
if (obj_to_read_selector) {
const obj = obj_to_read_selector.nextObject();
if (!obj) {
return;
}
const resp = s3_client.get(obj.s3_bucket, obj.s3_key)
if (!resp.success) {
log.withFields({bucket: obj.s3_bucket, key: obj.s3_key})
.error(resp.error);
}
return
}
const obj = obj_list[Math.floor(Math.random() * obj_list.length)]; const obj = obj_list[Math.floor(Math.random() * obj_list.length)];
const resp = s3_client.get(obj.bucket, obj.object); const resp = s3_client.get(obj.bucket, obj.object);
@ -230,8 +189,7 @@ export function obj_delete() {
const resp = s3_client.delete(obj.s3_bucket, obj.s3_key); const resp = s3_client.delete(obj.s3_bucket, obj.s3_key);
if (!resp.success) { if (!resp.success) {
log.withFields({bucket: obj.s3_bucket, key: obj.s3_key, op: 'DELETE'}) log.withFields({bucket: obj.s3_bucket, key: obj.s3_key, op: "DELETE"}).error(resp.error);
.error(resp.error);
return; return;
} }

View file

@ -1,233 +0,0 @@
import {sleep} from 'k6';
import {SharedArray} from 'k6/data';
import exec from 'k6/execution';
import logging from 'k6/x/frostfs/logging';
import registry from 'k6/x/frostfs/registry';
import s3 from 'k6/x/frostfs/s3';
import stats from 'k6/x/frostfs/stats';
import {generateS3Key} from './libs/keygen.js';
import {newGenerator} from './libs/datagen.js';
import {parseEnv} from './libs/env-parser.js';
import {textSummary} from './libs/k6-summary-0.0.2.js';
parseEnv();
const obj_list = new SharedArray(
'obj_list',
function() { return JSON.parse(open(__ENV.PREGEN_JSON)).objects; });
const bucket_list = new SharedArray(
'bucket_list',
function() { return JSON.parse(open(__ENV.PREGEN_JSON)).buckets; });
const read_size = JSON.parse(open(__ENV.PREGEN_JSON)).obj_size;
const summary_json = __ENV.SUMMARY_JSON || '/tmp/summary.json';
const no_verify_ssl = __ENV.NO_VERIFY_SSL || 'true';
const connection_args = {
no_verify_ssl : no_verify_ssl
}
// Select random S3 endpoint for current VU
const s3_endpoints = __ENV.S3_ENDPOINTS.split(',');
const s3_endpoint =
s3_endpoints[Math.floor(Math.random() * s3_endpoints.length)];
const s3_client = s3.connect(s3_endpoint, connection_args);
const log = logging.new().withField('endpoint', s3_endpoint);
const registry_enabled = !!__ENV.REGISTRY_FILE;
const obj_registry =
registry_enabled ? registry.open(__ENV.REGISTRY_FILE) : undefined;
const duration = __ENV.DURATION;
if (!!__ENV.METRIC_TAGS) {
stats.setTags(__ENV.METRIC_TAGS)
}
const read_age = __ENV.READ_AGE ? parseInt(__ENV.READ_AGE) : 10;
let obj_to_read_selector = undefined;
if (registry_enabled) {
obj_to_read_selector = registry.getSelector(
__ENV.REGISTRY_FILE, 'obj_to_read',
__ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0, {
status : 'created',
age : read_age,
})
}
const scenarios = {};
const write_vu_count = parseInt(__ENV.WRITERS || '0');
const generator = newGenerator(write_vu_count > 0);
if (write_vu_count > 0) {
scenarios.write = {
executor : 'constant-vus',
vus : write_vu_count,
duration : `${duration}s`,
exec : 'obj_write',
gracefulStop : '5s',
};
}
const read_vu_count = parseInt(__ENV.READERS || '0');
if (read_vu_count > 0) {
scenarios.read = {
executor : 'constant-vus',
vus : read_vu_count,
duration : `${duration}s`,
exec : 'obj_read',
gracefulStop : '5s',
};
}
const delete_age = __ENV.DELETE_AGE ? parseInt(__ENV.DELETE_AGE) : undefined;
let obj_to_delete_selector = undefined;
let obj_to_delete_exit_on_null = undefined;
if (registry_enabled ) {
obj_to_delete_exit_on_null = (write_vu_count == 0) && (read_vu_count == 0)
let constructor = obj_to_delete_exit_on_null ? registry.getOneshotSelector
: registry.getSelector;
obj_to_delete_selector =
constructor(__ENV.REGISTRY_FILE, 'obj_to_delete',
__ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0, {
status : 'read',
age : delete_age,
});
}
const delete_vu_count = parseInt(__ENV.DELETERS || '0');
if (delete_vu_count > 0) {
if (!obj_to_delete_selector) {
throw 'Positive DELETE worker number without a proper object selector';
}
scenarios.delete = {
executor : 'constant-vus',
vus : delete_vu_count,
duration : `${duration}s`,
exec : 'obj_delete',
gracefulStop : '5s',
};
}
export const options = {
scenarios,
setupTimeout : '5s',
};
export function setup() {
const total_vu_count = write_vu_count + read_vu_count + delete_vu_count;
console.log(`Pregenerated buckets: ${bucket_list.length}`);
console.log(`Pregenerated read object size: ${read_size}`);
console.log(`Pregenerated total objects: ${obj_list.length}`);
console.log(`Reading VUs: ${read_vu_count}`);
console.log(`Writing VUs: ${write_vu_count}`);
console.log(`Deleting VUs: ${delete_vu_count}`);
console.log(`Total VUs: ${total_vu_count}`);
const start_timestamp = Date.now()
console.log(
`Load started at: ${Date(start_timestamp).toString()}`)
}
export function teardown(data) {
if (obj_registry) {
obj_registry.close();
}
const end_timestamp = Date.now()
console.log(
`Load finished at: ${Date(end_timestamp).toString()}`)
}
export function handleSummary(data) {
return {
'stdout' : textSummary(data, {indent : ' ', enableColors : false}),
[summary_json] : JSON.stringify(data),
};
}
export function obj_write() {
if (__ENV.SLEEP_WRITE) {
sleep(__ENV.SLEEP_WRITE);
}
const key = generateS3Key();
const bucket = bucket_list[Math.floor(Math.random() * bucket_list.length)];
const payload = generator.genPayload();
const resp = s3_client.put(bucket, key, payload);
if (!resp.success) {
log.withFields({bucket : bucket, key : key}).error(resp.error);
return;
}
if (obj_registry) {
obj_registry.addObject('', '', bucket, key, payload.hash());
}
}
export function obj_read() {
if (__ENV.SLEEP_READ) {
sleep(__ENV.SLEEP_READ);
}
if (obj_to_read_selector) {
const obj = obj_to_read_selector.nextObject();
if (!obj ) {
return;
}
const resp = s3_client.get(obj.s3_bucket, obj.s3_key)
if (!resp.success) {
log.withFields({bucket : obj.s3_bucket, key : obj.s3_key, status: obj.status, op: `READ`})
.error(resp.error);
} else {
obj_registry.setObjectStatus(obj.id, obj.status, 'read');
}
return
}
const obj = obj_list[Math.floor(Math.random() * obj_list.length)];
const resp = s3_client.get(obj.bucket, obj.object);
if (!resp.success) {
log.withFields({bucket : obj.bucket, key : obj.object}).error(resp.error);
} else {
obj_registry.setObjectStatus(obj.id, obj.status, 'read');
}
}
export function obj_delete() {
if (__ENV.SLEEP_DELETE) {
sleep(__ENV.SLEEP_DELETE);
}
const obj = obj_to_delete_selector.nextObject();
delete_object(obj)
}
export function delete_object(obj) {
if (!obj) {
if (obj_to_delete_exit_on_null) {
exec.test.abort("No more objects to select");
}
return;
}
const resp = s3_client.delete(obj.s3_bucket, obj.s3_key);
if (!resp.success) {
log.withFields({bucket : obj.s3_bucket, key : obj.s3_key, op : 'DELETE'})
.error(resp.error);
return;
}
obj_registry.deleteObject(obj.id);
}

View file

@ -1,119 +0,0 @@
import {sleep} from 'k6';
import {SharedArray} from 'k6/data';
import logging from 'k6/x/frostfs/logging';
import registry from 'k6/x/frostfs/registry';
import s3 from 'k6/x/frostfs/s3';
import stats from 'k6/x/frostfs/stats';
import {generateS3Key} from './libs/keygen.js';
import {newGenerator} from './libs/datagen.js';
import {parseEnv} from './libs/env-parser.js';
import {textSummary} from './libs/k6-summary-0.0.2.js';
parseEnv();
const bucket_list = new SharedArray('bucket_list', function() {
return JSON.parse(open(__ENV.PREGEN_JSON)).buckets;
});
const summary_json = __ENV.SUMMARY_JSON || '/tmp/summary.json';
// Select random S3 endpoint for current VU
const s3_endpoints = __ENV.S3_ENDPOINTS.split(',');
const s3_endpoint =
s3_endpoints[Math.floor(Math.random() * s3_endpoints.length)];
const no_verify_ssl = __ENV.NO_VERIFY_SSL || 'true';
const connection_args = {
no_verify_ssl: no_verify_ssl
};
const s3_client = s3.connect(s3_endpoint, connection_args);
const log = logging.new().withField('endpoint', s3_endpoint);
const registry_enabled = !!__ENV.REGISTRY_FILE;
const obj_registry =
registry_enabled ? registry.open(__ENV.REGISTRY_FILE) : undefined;
const duration = __ENV.DURATION;
if (!!__ENV.METRIC_TAGS) {
stats.setTags(__ENV.METRIC_TAGS)
}
const scenarios = {};
const write_vu_count = parseInt(__ENV.WRITERS || '0');
if (write_vu_count < 1) {
throw 'number of VUs (env WRITERS) performing write operations should be greater than 0';
}
const write_multipart_vu_count = parseInt(__ENV.WRITERS_MULTIPART || '0');
if (write_multipart_vu_count < 1) {
throw 'number of parts (env WRITERS_MULTIPART) to upload in parallel should be greater than 0';
}
const generator =
newGenerator(write_vu_count > 0 || write_multipart_vu_count > 0);
if (write_vu_count > 0) {
scenarios.write_multipart = {
executor: 'constant-vus',
vus: write_vu_count,
duration: `${duration}s`,
exec: 'obj_write_multipart',
gracefulStop: '5s',
};
}
export const options = {
scenarios,
setupTimeout: '5s',
};
export function setup() {
const total_vu_count = write_vu_count * write_multipart_vu_count;
console.log(`Pregenerated buckets: ${bucket_list.length}`);
console.log(`Writing VUs: ${write_vu_count}`);
console.log(`Writing multipart VUs: ${write_multipart_vu_count}`);
console.log(`Total VUs: ${total_vu_count}`);
}
export function teardown(data) {
if (obj_registry) {
obj_registry.close();
}
}
export function handleSummary(data) {
return {
'stdout': textSummary(data, {indent: ' ', enableColors: false}),
[summary_json]: JSON.stringify(data),
};
}
const write_multipart_part_size =
1024 * parseInt(__ENV.WRITE_OBJ_PART_SIZE || '0')
if (write_multipart_part_size < 5 * 1024 * 1024) {
throw 'part size (env WRITE_OBJ_PART_SIZE * 1024) must be greater than (5 MB)';
}
export function obj_write_multipart() {
if (__ENV.SLEEP_WRITE) {
sleep(__ENV.SLEEP_WRITE);
}
const key = generateS3Key();
const bucket = bucket_list[Math.floor(Math.random() * bucket_list.length)];
const payload = generator.genPayload();
const resp = s3_client.multipart(
bucket, key, write_multipart_part_size, write_multipart_vu_count,
payload);
if (!resp.success) {
log.withFields({bucket: bucket, key: key}).error(resp.error);
return;
}
if (obj_registry) {
obj_registry.addObject('', '', bucket, key, payload.hash());
}
}

View file

@ -1,27 +1,23 @@
import {SharedArray} from 'k6/data'; import datagen from 'k6/x/frostfs/datagen';
import exec from 'k6/execution';
import logging from 'k6/x/frostfs/logging'; import logging from 'k6/x/frostfs/logging';
import registry from 'k6/x/frostfs/registry'; import registry from 'k6/x/frostfs/registry';
import s3local from 'k6/x/frostfs/s3local'; import s3local from 'k6/x/frostfs/s3local';
import stats from 'k6/x/frostfs/stats'; import { SharedArray } from 'k6/data';
import { textSummary } from './libs/k6-summary-0.0.2.js';
import {generateS3Key} from './libs/keygen.js'; import { parseEnv } from './libs/env-parser.js';
import {newGenerator} from './libs/datagen.js'; import { uuidv4 } from './libs/k6-utils-1.4.0.js';
import {parseEnv} from './libs/env-parser.js';
import {textSummary} from './libs/k6-summary-0.0.2.js';
import {uuidv4} from './libs/k6-utils-1.4.0.js';
parseEnv(); parseEnv();
const obj_list = new SharedArray('obj_list', function() { const obj_list = new SharedArray('obj_list', function () {
return JSON.parse(open(__ENV.PREGEN_JSON)).objects; return JSON.parse(open(__ENV.PREGEN_JSON)).objects;
}); });
const container_list = new SharedArray('container_list', function() { const container_list = new SharedArray('container_list', function () {
return JSON.parse(open(__ENV.PREGEN_JSON)).containers; return JSON.parse(open(__ENV.PREGEN_JSON)).containers;
}); });
const bucket_list = new SharedArray('bucket_list', function() { const bucket_list = new SharedArray('bucket_list', function () {
return JSON.parse(open(__ENV.PREGEN_JSON)).buckets; return JSON.parse(open(__ENV.PREGEN_JSON)).buckets;
}); });
@ -37,43 +33,24 @@ function bucket_mapping() {
} }
const read_size = JSON.parse(open(__ENV.PREGEN_JSON)).obj_size; const read_size = JSON.parse(open(__ENV.PREGEN_JSON)).obj_size;
const summary_json = __ENV.SUMMARY_JSON || '/tmp/summary.json'; const summary_json = __ENV.SUMMARY_JSON || "/tmp/summary.json";
const config_file = __ENV.CONFIG_FILE; const config_file = __ENV.CONFIG_FILE;
const config_dir = __ENV.CONFIG_DIR; const s3_client = s3local.connect(config_file, {
const max_total_size_gb =
__ENV.MAX_TOTAL_SIZE_GB ? parseInt(__ENV.MAX_TOTAL_SIZE_GB) : 0;
const s3_client = s3local.connect(
config_file, config_dir, {
'debug_logger': __ENV.DEBUG_LOGGER || 'false', 'debug_logger': __ENV.DEBUG_LOGGER || 'false',
}, }, bucket_mapping());
bucket_mapping(), max_total_size_gb); const log = logging.new().withField("config", config_file);
const log = logging.new().withFields(
{'config_file': config_file, 'config_dir': config_dir});
if (!!__ENV.METRIC_TAGS) {
stats.setTags(__ENV.METRIC_TAGS)
}
const registry_enabled = !!__ENV.REGISTRY_FILE; const registry_enabled = !!__ENV.REGISTRY_FILE;
const obj_registry = const obj_registry = registry_enabled ? registry.open(__ENV.REGISTRY_FILE) : undefined;
registry_enabled ? registry.open(__ENV.REGISTRY_FILE) : undefined;
let obj_to_read_selector = undefined;
if (registry_enabled) {
obj_to_read_selector = registry.getLoopedSelector(
__ENV.REGISTRY_FILE, 'obj_to_read',
__ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0, {
status: 'created',
})
}
const duration = __ENV.DURATION; const duration = __ENV.DURATION;
const generator = datagen.generator(1024 * parseInt(__ENV.WRITE_OBJ_SIZE));
const scenarios = {}; const scenarios = {};
const write_vu_count = parseInt(__ENV.WRITERS || '0'); const write_vu_count = parseInt(__ENV.WRITERS || '0');
const generator = newGenerator(write_vu_count > 0);
if (write_vu_count > 0) { if (write_vu_count > 0) {
scenarios.write = { scenarios.write = {
executor: 'constant-vus', executor: 'constant-vus',
@ -109,61 +86,38 @@ export function setup() {
console.log(`Reading VUs: ${read_vu_count}`); console.log(`Reading VUs: ${read_vu_count}`);
console.log(`Writing VUs: ${write_vu_count}`); console.log(`Writing VUs: ${write_vu_count}`);
console.log(`Total VUs: ${total_vu_count}`); console.log(`Total VUs: ${total_vu_count}`);
const start_timestamp = Date.now()
console.log(
`Load started at: ${Date(start_timestamp).toString()}`)
} }
export function teardown(data) { export function teardown(data) {
if (obj_registry) { if (obj_registry) {
obj_registry.close(); obj_registry.close();
} }
const end_timestamp = Date.now()
console.log(
`Load finished at: ${Date(end_timestamp).toString()}`)
} }
export function handleSummary(data) { export function handleSummary(data) {
return { return {
'stdout': textSummary(data, {indent: ' ', enableColors: false}), 'stdout': textSummary(data, { indent: ' ', enableColors: false }),
[summary_json]: JSON.stringify(data), [summary_json]: JSON.stringify(data),
}; };
} }
export function obj_write() { export function obj_write() {
const key = generateS3Key(); const key = __ENV.OBJ_NAME || uuidv4();
const bucket = bucket_list[Math.floor(Math.random() * bucket_list.length)]; const bucket = bucket_list[Math.floor(Math.random() * bucket_list.length)];
const payload = generator.genPayload(); const { payload, hash } = generator.genPayload(registry_enabled);
const resp = s3_client.put(bucket, key, payload); const resp = s3_client.put(bucket, key, payload);
if (!resp.success) { if (!resp.success) {
if (resp.abort) {
exec.test.abort(resp.error);
}
log.withFields({bucket: bucket, key: key}).error(resp.error); log.withFields({bucket: bucket, key: key}).error(resp.error);
return; return;
} }
if (obj_registry) { if (obj_registry) {
obj_registry.addObject('', '', bucket, key, payload.hash()); obj_registry.addObject("", "", bucket, key, hash);
} }
} }
export function obj_read() { export function obj_read() {
if (obj_to_read_selector) {
const obj = obj_to_read_selector.nextObject();
if (!obj) {
return;
}
const resp = s3_client.get(obj.s3_bucket, obj.s3_key)
if (!resp.success) {
log.withFields({bucket: obj.s3_bucket, key: obj.s3_key})
.error(resp.error);
}
return
}
const obj = obj_list[Math.floor(Math.random() * obj_list.length)]; const obj = obj_list[Math.floor(Math.random() * obj_list.length)];
const resp = s3_client.get(obj.bucket, obj.object); const resp = s3_client.get(obj.bucket, obj.object);

View file

@ -1,21 +1,19 @@
import { sleep } from 'k6';
import { Counter } from 'k6/metrics';
import logging from 'k6/x/frostfs/logging';
import native from 'k6/x/frostfs/native'; import native from 'k6/x/frostfs/native';
import registry from 'k6/x/frostfs/registry'; import registry from 'k6/x/frostfs/registry';
import s3 from 'k6/x/frostfs/s3'; import s3 from 'k6/x/frostfs/s3';
import stats from 'k6/x/frostfs/stats'; import logging from 'k6/x/frostfs/logging';
import { sleep } from 'k6';
import { parseEnv } from './libs/env-parser.js'; import { Counter } from 'k6/metrics';
import { textSummary } from './libs/k6-summary-0.0.2.js'; import { textSummary } from './libs/k6-summary-0.0.2.js';
import { parseEnv } from './libs/env-parser.js';
parseEnv(); parseEnv();
const obj_registry = registry.open(__ENV.REGISTRY_FILE); const obj_registry = registry.open(__ENV.REGISTRY_FILE);
// Time limit (in seconds) for the run // Time limit (in seconds) for the run
const time_limit = __ENV.TIME_LIMIT || '60'; const time_limit = __ENV.TIME_LIMIT || "60";
const summary_json = __ENV.SUMMARY_JSON || '/tmp/summary.json'; const summary_json = __ENV.SUMMARY_JSON || "/tmp/summary.json";
// Number of objects in each status. These counters are cumulative in a // Number of objects in each status. These counters are cumulative in a
// sense that they reflect total number of objects in the registry, not just // sense that they reflect total number of objects in the registry, not just
@ -30,50 +28,38 @@ const obj_counters = {
let log = logging.new(); let log = logging.new();
if (!!__ENV.METRIC_TAGS) {
stats.setTags(__ENV.METRIC_TAGS)
}
// Connect to random gRPC endpoint // Connect to random gRPC endpoint
let grpc_client = undefined; let grpc_client = undefined;
if (__ENV.GRPC_ENDPOINTS) { if (__ENV.GRPC_ENDPOINTS) {
const grpcEndpoints = __ENV.GRPC_ENDPOINTS.split(','); const grpcEndpoints = __ENV.GRPC_ENDPOINTS.split(',');
const grpcEndpoint = const grpcEndpoint = grpcEndpoints[Math.floor(Math.random() * grpcEndpoints.length)];
grpcEndpoints[Math.floor(Math.random() * grpcEndpoints.length)]; log = log.withField("endpoint", grpcEndpoint);
log = log.withField('endpoint', grpcEndpoint); grpc_client = native.connect(grpcEndpoint, '', __ENV.DIAL_TIMEOUT ? parseInt(__ENV.DIAL_TIMEOUT) : 0, __ENV.STREAM_TIMEOUT ? parseInt(__ENV.STREAM_TIMEOUT) : 0);
grpc_client = native.connect(
grpcEndpoint, '', __ENV.DIAL_TIMEOUT ? parseInt(__ENV.DIAL_TIMEOUT) : 0,
__ENV.STREAM_TIMEOUT ? parseInt(__ENV.STREAM_TIMEOUT) : 0,
__ENV.PREPARE_LOCALLY ? __ENV.PREPARE_LOCALLY.toLowerCase() === 'true' : false,
1024 * parseInt(__ENV.MAX_OBJECT_SIZE || '0'));
} }
// Connect to random S3 endpoint // Connect to random S3 endpoint
let s3_client = undefined; let s3_client = undefined;
if (__ENV.S3_ENDPOINTS) { if (__ENV.S3_ENDPOINTS) {
const no_verify_ssl = __ENV.NO_VERIFY_SSL || 'true';
const connection_args = { no_verify_ssl: no_verify_ssl };
const s3_endpoints = __ENV.S3_ENDPOINTS.split(','); const s3_endpoints = __ENV.S3_ENDPOINTS.split(',');
const s3_endpoint = const s3_endpoint = s3_endpoints[Math.floor(Math.random() * s3_endpoints.length)];
s3_endpoints[Math.floor(Math.random() * s3_endpoints.length)]; log = log.withField("endpoint", s3_endpoint);
log = log.withField('endpoint', s3_endpoint); s3_client = s3.connect(`http://${s3_endpoint}`);
s3_client = s3.connect(s3_endpoint, connection_args);
} }
// We will attempt to verify every object in "created" status. The scenario will // We will attempt to verify every object in "created" status. The scenario will execute
// execute as many iterations as there are objects. Each object will have 3 // as many iterations as there are objects. Each object will have 3 retries to be verified
// retries to be verified
const obj_to_verify_selector = registry.getSelector( const obj_to_verify_selector = registry.getSelector(
__ENV.REGISTRY_FILE, 'obj_to_verify', __ENV.REGISTRY_FILE,
__ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0, { "obj_to_verify",
status: 'created', __ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0,
}); {
status: "created",
}
);
const obj_to_verify_count = obj_to_verify_selector.count(); const obj_to_verify_count = obj_to_verify_selector.count();
// Execute at least one iteration (executor shared-iterations can't run 0 // Execute at least one iteration (executor shared-iterations can't run 0 iterations)
// iterations)
const iterations = Math.max(1, obj_to_verify_count); const iterations = Math.max(1, obj_to_verify_count);
// Executor shared-iterations requires number of iterations to be larger than // Executor shared-iterations requires number of iterations to be larger than number of VUs
// number of VUs
const vus = Math.min(__ENV.CLIENTS, iterations); const vus = Math.min(__ENV.CLIENTS, iterations);
const scenarios = { const scenarios = {
@ -96,8 +82,10 @@ export function setup() {
// Populate counters with initial values // Populate counters with initial values
for (const [status, counter] of Object.entries(obj_counters)) { for (const [status, counter] of Object.entries(obj_counters)) {
const obj_selector = registry.getSelector( const obj_selector = registry.getSelector(
__ENV.REGISTRY_FILE, status, __ENV.REGISTRY_FILE,
__ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0, { status }); status,
__ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0,
{ status });
counter.add(obj_selector.count()); counter.add(obj_selector.count());
} }
} }
@ -111,7 +99,7 @@ export function handleSummary(data) {
export function obj_verify() { export function obj_verify() {
if (obj_to_verify_count == 0) { if (obj_to_verify_count == 0) {
log.info('Nothing to verify'); log.info("Nothing to verify");
return; return;
} }
@ -121,7 +109,7 @@ export function obj_verify() {
const obj = obj_to_verify_selector.nextObject(); const obj = obj_to_verify_selector.nextObject();
if (!obj) { if (!obj) {
log.info('All objects have been verified'); log.info("All objects have been verified");
return; return;
} }
@ -137,12 +125,11 @@ function verify_object_with_retries(obj, attempts) {
// ReferenceError: Cannot access a variable before initialization. // ReferenceError: Cannot access a variable before initialization.
let lg = log; let lg = log;
if (obj.c_id && obj.o_id) { if (obj.c_id && obj.o_id) {
lg = lg.withFields({ cid: obj.c_id, oid: obj.o_id }); lg = lg.withFields({cid: obj.c_id, oid: obj.o_id});
result = grpc_client.verifyHash(obj.c_id, obj.o_id, obj.payload_hash); result = grpc_client.verifyHash(obj.c_id, obj.o_id, obj.payload_hash);
} else if (obj.s3_bucket && obj.s3_key) { } else if (obj.s3_bucket && obj.s3_key) {
lg = lg.withFields({ bucket: obj.s3_bucket, key: obj.s3_key }); lg = lg.withFields({bucket: obj.s3_bucket, key: obj.s3_key});
result = result = s3_client.verifyHash(obj.s3_bucket, obj.s3_key, obj.payload_hash);
s3_client.verifyHash(obj.s3_bucket, obj.s3_key, obj.payload_hash);
} else { } else {
lg.withFields({ lg.withFields({
cid: obj.c_id, cid: obj.c_id,
@ -150,20 +137,19 @@ function verify_object_with_retries(obj, attempts) {
bucket: obj.s3_bucket, bucket: obj.s3_bucket,
key: obj.s3_key key: obj.s3_key
}).warn(`Object cannot be verified with supported protocols`); }).warn(`Object cannot be verified with supported protocols`);
return 'skipped'; return "skipped";
} }
if (result.success) { if (result.success) {
return 'verified'; return "verified";
} else if (result.error == 'hash mismatch') { } else if (result.error == "hash mismatch") {
return 'invalid'; return "invalid";
} }
// Unless we explicitly saw that there was a hash mismatch, then we will // Unless we explicitly saw that there was a hash mismatch, then we will retry after a delay
// retry after a delay
lg.error(`Verify error: ${result.error}. Object will be re-tried`); lg.error(`Verify error: ${result.error}. Object will be re-tried`);
sleep(__ENV.SLEEP); sleep(__ENV.SLEEP);
} }
return 'invalid'; return "invalid";
} }