Compare commits

..

1 commit

Author SHA1 Message Date
0addc11a78 [#13] Allow to use english text in the payload
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2023-03-10 10:35:28 +03:00
82 changed files with 1595 additions and 6016 deletions

View file

@ -1,20 +0,0 @@
---
name: Feature request
about: Suggest an idea for this project
title: ''
labels: community, triage
assignees: ''
---
## Is your feature request related to a problem? Please describe.
<!--- A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] -->
## Describe the solution you'd like
<!--- A clear and concise description of what you want to happen. -->
## Describe alternatives you've considered
<!--- A clear and concise description of any alternative solutions or features you've considered. -->
## Additional context
<!--- Add any other context or screenshots about the feature request here. -->

View file

@ -1,21 +0,0 @@
name: DCO action
on: [pull_request]
jobs:
dco:
name: DCO
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Setup Go
uses: actions/setup-go@v3
with:
go-version: '1.22'
- name: Run commit format checker
uses: https://git.frostfs.info/TrueCloudLab/dco-go@v2
with:
from: 'origin/${{ github.event.pull_request.base.ref }}'

View file

@ -1,56 +0,0 @@
name: Tests and linters
on: [pull_request]
jobs:
lint:
name: Lint
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version: '1.23'
cache: true
- name: Install linters
run: make lint-install
- name: Run linters
run: make lint
tests:
name: Tests
runs-on: ubuntu-latest
strategy:
matrix:
go_versions: [ '1.22', '1.23' ]
fail-fast: false
steps:
- uses: actions/checkout@v3
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version: '${{ matrix.go_versions }}'
cache: true
- name: Run tests
run: make test
tests-race:
name: Tests with -race
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version: '1.22'
cache: true
- name: Run tests
run: go test ./... -count=1 -race

View file

@ -2,7 +2,7 @@
name: Bug report name: Bug report
about: Create a report to help us improve about: Create a report to help us improve
title: '' title: ''
labels: community, triage, bug labels: community, triage
assignees: '' assignees: ''
--- ---
@ -18,11 +18,8 @@ assignees: ''
If suggesting a change/improvement, explain the difference from current behavior --> If suggesting a change/improvement, explain the difference from current behavior -->
## Possible Solution ## Possible Solution
<!-- Not obligatory <!-- Not obligatory, but suggest a fix/reason for the bug,
If no reason/fix/additions for the bug can be suggested, or ideas how to implement the addition or change -->
uncomment the following phrase:
No fix can be suggested by a QA engineer. Further solutions shall be up to developers. -->
## Steps to Reproduce (for bugs) ## Steps to Reproduce (for bugs)
<!-- Provide a link to a live example, or an unambiguous set of steps <!-- Provide a link to a live example, or an unambiguous set of steps

View file

@ -0,0 +1,20 @@
---
name: Feature request
about: Suggest an idea for this project
title: ''
labels: community, triage
assignees: ''
---
## Is your feature request related to a problem? Please describe.
<!-- A clear and concise description of what the problem is. Ex. I'm always frustrated when ... -->
## Describe the solution you'd like
<!-- A clear and concise description of what you want to happen. -->
## Describe alternatives you've considered
<!-- A clear and concise description of any alternative solutions or features you've considered. -->
## Additional context
<!-- Add any other context or screenshots about the feature request here. -->

View file

Before

Width:  |  Height:  |  Size: 5.5 KiB

After

Width:  |  Height:  |  Size: 5.5 KiB

21
.github/workflows/dco.yml vendored Normal file
View file

@ -0,0 +1,21 @@
name: DCO check
on:
pull_request:
branches:
- master
jobs:
commits_check_job:
runs-on: ubuntu-latest
name: Commits Check
steps:
- name: Get PR Commits
id: 'get-pr-commits'
uses: tim-actions/get-pr-commits@master
with:
token: ${{ secrets.GITHUB_TOKEN }}
- name: DCO Check
uses: tim-actions/dco@master
with:
commits: ${{ steps.get-pr-commits.outputs.commits }}

34
.github/workflows/go.yml vendored Normal file
View file

@ -0,0 +1,34 @@
name: Tests
on:
pull_request:
branches:
- master
types: [opened, synchronize]
paths-ignore:
- '**/*.md'
workflow_dispatch:
jobs:
lint:
name: Lint
runs-on: ubuntu-20.04
steps:
- name: Check out code
uses: actions/checkout@v2
- name: golangci-lint
uses: golangci/golangci-lint-action@v2
with:
version: latest
args: --timeout=2m
tests:
name: Tests
runs-on: ubuntu-20.04
strategy:
matrix:
go_versions: [ '1.17', '1.18', '1.19' ]
fail-fast: false
steps:
- uses: actions/checkout@v3

3
.gitignore vendored
View file

@ -1,6 +1,3 @@
k6 k6
*.bolt *.bolt
presets presets
bin
# Preset script artifacts.
__pycache__

View file

@ -3,8 +3,8 @@
First, thank you for contributing! We love and encourage pull requests from First, thank you for contributing! We love and encourage pull requests from
everyone. Please follow the guidelines: everyone. Please follow the guidelines:
- Check the open [issues](https://git.frostfs.info/TrueCloudLab/xk6-frostfs/issues) and - Check the open [issues](https://github.com/TrueCloudLab/xk6-frostfs/issues) and
[pull requests](https://git.frostfs.info/TrueCloudLab/xk6-frostfs/pulls) for existing [pull requests](https://github.com/TrueCloudLab/xk6-frostfs/pulls) for existing
discussions. discussions.
- Open an issue first, to discuss a new feature or enhancement. - Open an issue first, to discuss a new feature or enhancement.
@ -27,20 +27,19 @@ Start by forking the `xk6-frostfs` repository, make changes in a branch and then
send a pull request. We encourage pull requests to discuss code changes. Here send a pull request. We encourage pull requests to discuss code changes. Here
are the steps in details: are the steps in details:
### Set up your repository ### Set up your GitHub Repository
Fork [xk6-frostfs upstream](https://github.com/TrueCloudLab/xk6-frostfs/fork) source
Fork [xk6-frostfs upstream](https://git.frostfs.info/TrueCloudLab/xk6-frostfs/fork) source
repository to your own personal repository. Copy the URL of your fork (you will repository to your own personal repository. Copy the URL of your fork (you will
need it for the `git clone` command below). need it for the `git clone` command below).
```sh ```sh
$ git clone https://git.frostfs.info/TrueCloudLab/xk6-frostfs $ git clone https://github.com/TrueCloudLab/xk6-frostfs
``` ```
### Set up git remote as ``upstream`` ### Set up git remote as ``upstream``
```sh ```sh
$ cd xk6-frostfs $ cd xk6-frostfs
$ git remote add upstream https://git.frostfs.info/TrueCloudLab/xk6-frostfs $ git remote add upstream https://github.com/TrueCloudLab/xk6-frostfs
$ git fetch upstream $ git fetch upstream
$ git merge upstream/master $ git merge upstream/master
... ...
@ -90,7 +89,7 @@ $ git push origin feature/123-something_awesome
``` ```
### Create a Pull Request ### Create a Pull Request
Pull requests can be created via git.frostfs.info. Refer to [this Pull requests can be created via GitHub. Refer to [this
document](https://help.github.com/articles/creating-a-pull-request/) for document](https://help.github.com/articles/creating-a-pull-request/) for
detailed steps on how to create a pull request. After a Pull Request gets peer detailed steps on how to create a pull request. After a Pull Request gets peer
reviewed and approved, it will be merged. reviewed and approved, it will be merged.

114
Makefile
View file

@ -1,114 +0,0 @@
#!/usr/bin/make -f
# Common variables
REPO ?= $(shell go list -m)
VERSION ?= $(shell git describe --tags --dirty --match "v*" --always --abbrev=8 2>/dev/null || cat VERSION 2>/dev/null || echo "develop")
GO_VERSION ?= 1.22
LINT_VERSION ?= 1.60.3
TRUECLOUDLAB_LINT_VERSION ?= 0.0.7
BINDIR = bin
OUTPUT_LINT_DIR ?= $(abspath $(BINDIR))/linters
LINT_DIR = $(OUTPUT_LINT_DIR)/golangci-lint-$(LINT_VERSION)-v$(TRUECLOUDLAB_LINT_VERSION)
TMP_DIR := .cache
# Binaries to build
CMDS = $(addprefix frostfs-, $(notdir $(wildcard cmd/*)))
BINS = $(addprefix $(BINDIR)/, $(CMDS))
.PHONY: all $(BINS) $(BINDIR) dep docker/ test cover format lint docker/lint pre-commit unpre-commit version clean
# Make all binaries
all: $(BINS)
$(BINS): $(BINDIR) dep
@echo "⇒ Build $@"
CGO_ENABLED=0 \
go build -v -trimpath \
-ldflags "-X $(REPO)/internal/version.Version=$(VERSION)" \
-o $@ ./cmd/$(subst frostfs-,,$(notdir $@))
$(BINDIR):
@echo "⇒ Ensure dir: $@"
@mkdir -p $@
# Pull go dependencies
dep:
@printf "⇒ Download requirements: "
@CGO_ENABLED=0 \
go mod download && echo OK
@printf "⇒ Tidy requirements: "
@CGO_ENABLED=0 \
go mod tidy -v && echo OK
# Run `make %` in Golang container, for more information run `make help.docker/%`
docker/%:
$(if $(filter $*,all $(BINS)), \
@echo "=> Running 'make $*' in clean Docker environment" && \
docker run --rm -t \
-v `pwd`:/src \
-w /src \
-u `stat -c "%u:%g" .` \
--env HOME=/src \
golang:$(GO_VERSION) make $*,\
@echo "supported docker targets: all $(BINS) lint")
# Run tests
test:
@go test ./... -cover
# Run tests with race detection and produce coverage output
cover:
@go test -v -race ./... -coverprofile=coverage.txt -covermode=atomic
@go tool cover -html=coverage.txt -o coverage.html
# Reformat code
format:
@echo "⇒ Processing gofmt check"
@gofmt -s -w ./
# Run linters
lint:
@if [ ! -d "$(LINT_DIR)" ]; then \
make lint-install; \
fi
$(LINT_DIR)/golangci-lint run --timeout=5m
# Install linters
lint-install:
@rm -rf $(OUTPUT_LINT_DIR)
@mkdir -p $(OUTPUT_LINT_DIR)
@mkdir -p $(TMP_DIR)
@rm -rf $(TMP_DIR)/linters
@git -c advice.detachedHead=false clone --branch v$(TRUECLOUDLAB_LINT_VERSION) https://git.frostfs.info/TrueCloudLab/linters.git $(TMP_DIR)/linters
@@make -C $(TMP_DIR)/linters lib CGO_ENABLED=1 OUT_DIR=$(OUTPUT_LINT_DIR)
@rm -rf $(TMP_DIR)/linters
@rmdir $(TMP_DIR) 2>/dev/null || true
@CGO_ENABLED=1 GOBIN=$(LINT_DIR) go install -trimpath github.com/golangci/golangci-lint/cmd/golangci-lint@v$(LINT_VERSION)
# Run linters in Docker
docker/lint:
docker run --rm -it \
-v `pwd`:/src \
-u `stat -c "%u:%g" .` \
--env HOME=/src \
golangci/golangci-lint:v$(LINT_VERSION) bash -c 'cd /src/ && make lint'
# Activate pre-commit hooks
pre-commit:
pre-commit install -t pre-commit -t commit-msg
# Deactivate pre-commit hooks
unpre-commit:
pre-commit uninstall -t pre-commit -t commit-msg
# Show current version
version:
@echo $(VERSION)
# Clean up files
clean:
rm -rf .cache
rm -rf $(BINDIR)
include help.mk

113
README.md
View file

@ -1,5 +1,5 @@
<p align="center"> <p align="center">
<img src="./.forgejo/logo.svg" width="500px" alt="FrostFS logo"> <img src="./.github/logo.svg" width="500px" alt="FrostFS logo">
</p> </p>
<p align="center"> <p align="center">
<a href="https://go.k6.io/k6">k6</a> extension to test and benchmark FrostFS related protocols. <a href="https://go.k6.io/k6">k6</a> extension to test and benchmark FrostFS related protocols.
@ -47,12 +47,10 @@ Create native client with `connect` method. Arguments:
- hex encoded private key (empty value produces random key) - hex encoded private key (empty value produces random key)
- dial timeout in seconds (0 for the default value) - dial timeout in seconds (0 for the default value)
- stream timeout in seconds (0 for the default value) - stream timeout in seconds (0 for the default value)
- generate object header on the client side (for big object - split locally too)
- max size for generated object header on the client side (for big object - the size that the object is splitted into)
```js ```js
import native from 'k6/x/frostfs/native'; import native from 'k6/x/frostfs/native';
const frostfs_cli = native.connect("s01.frostfs.devenv:8080", "", 0, 0, false, 0) const frostfs_cli = native.connect("s01.frostfs.devenv:8080", "", 0, 0)
``` ```
### Methods ### Methods
@ -71,27 +69,6 @@ const frostfs_cli = native.connect("s01.frostfs.devenv:8080", "", 0, 0, false, 0
It returns dictionary with `success` boolean flag, `object_id` string and It returns dictionary with `success` boolean flag, `object_id` string and
`error` string. `error` string.
## Local
Create a local client with `connect` method. Arguments:
- local path to frostfs storage node configuration file
- local path to frostfs storage node configuration directory
- hex encoded private key (empty value produces random key)
- whether to use the debug logger (warning: very verbose)
```js
import local from 'k6/x/frostfs/local';
const local_client = local.connect("/path/to/config.yaml", "/path/to/config/dir", "", false)
```
### Methods
- `put(container_id, headers, payload)`. Returns dictionary with `success`
boolean flag, `object_id` string, and `error` string.
- `get(container_id, object_id)`. Returns dictionary with `success` boolean
flag, and `error` string.
- `delete(container_id, object_id)`. Returns dictionary with `success` boolean
flag, and `error` string.
## S3 ## S3
Create s3 client with `connect` method. Arguments: Create s3 client with `connect` method. Arguments:
@ -101,13 +78,13 @@ Credentials are taken from default AWS configuration files and ENVs.
```js ```js
import s3 from 'k6/x/frostfs/s3'; import s3 from 'k6/x/frostfs/s3';
const s3_cli = s3.connect("https://s3.frostfs.devenv:8080") const s3_cli = s3.connect("http://s3.frostfs.devenv:8080")
``` ```
You can also provide additional options: You can also provide additional options:
```js ```js
import s3 from 'k6/x/frostfs/s3'; import s3 from 'k6/x/frostfs/s3';
const s3_cli = s3.connect("https://s3.frostfs.devenv:8080", {'no_verify_ssl': 'true', 'timeout': '60s'}) const s3_cli = s3.connect("http://s3.frostfs.devenv:8080", {'no_verify_ssl': 'true', 'timeout': '60s'})
``` ```
* `no_verify_ss` - Bool. If `true` - skip verifying the s3 certificate chain and host name (useful if s3 uses self-signed certificates) * `no_verify_ss` - Bool. If `true` - skip verifying the s3 certificate chain and host name (useful if s3 uses self-signed certificates)
@ -121,89 +98,9 @@ const s3_cli = s3.connect("https://s3.frostfs.devenv:8080", {'no_verify_ssl': 't
- `get(bucket, key)`. Returns dictionary with `success` boolean flag and `error` - `get(bucket, key)`. Returns dictionary with `success` boolean flag and `error`
string. string.
## S3 Local
Create local s3 client with `connect` method. Arguments:
- local path to frostfs storage node configuration file
- local path to frostfs storage node configuration directory
- parameter map with the following options:
* `hex_key`: private key to use as a hexadecimal string. A random one is created if none is provided.
* `node_position`: position of this node in the node array if loading multiple nodes independently (default: 0).
* `node_count`: number of nodes in the node array if loading multiple nodes independently (default: 1).
* `debug_logger`: whether to use the development logger instead of the default. Helpful for debugging (default: false).
- bucket-container mapping, which is needed to resolve the container id for a given bucket name. Any bucket
used by the client must have an entry here.
```js
import local from 'k6/x/frostfs/local';
const params = {'node_position': 1, 'node_count': 3}
const bucketMapping = {'mytestbucket': 'GBQDDUM1hdodXmiRHV57EUkFWJzuntsG8BG15wFSwam6'}
const local_client = local.connect("/path/to/config.yaml", "/path/to/config/dir", params, bucketMapping)
```
### Methods
- `put(bucket, key, payload)`. Returns dictionary with `success` boolean flag
and `error` string.
- `get(bucket, key)`. Returns dictionary with `success` boolean flag and `error`
string.
# Examples # Examples
See native protocol and s3 test suite examples in [examples](./examples) dir. See native protocol and s3 test suit examples in [examples](./examples) dir.
# Command line utils
To build all command line utils just run:
```shell
$ make
```
All binaries will be in `bin` directory.
## Export registry db
You can export registry bolt db to json file, that can be used as pregen for scenarios (see [docs](./scenarios/run_scenarios.md)).
To do this use `frostfs-xk6-registry-exporter`, available flags can be seen in help:
```shell
$ ./bin/frostfs-xk6-registry-exporter -h
Registry exporter for xk6
Usage:
registry-exporter [flags]
Examples:
registry-exporter registry.bolt
registry-exporter --status created --out out.json registry.bolt
Flags:
--age int Object age
--format string Output format (default "json")
-h, --help help for registry-exporter
--out string Path to output file (default "dumped-registry.json")
--status string Object status (default "created")
-v, --version version for registry-exporter
```
## Import pregen into registry db
You can import pregenerated json files into registry bolt db. Use `frostfs-xk6-registry import`. Usage examples are in help:
```shell
$ ./bin/frostfs-xk6-registry import -h
Import objects into registry from pregenerated files
Usage:
xk6-registry import [flags]
Examples:
xk6-registry import registry.bolt preset.json
xk6-registry import registry.bolt preset.json another_preset.json
Flags:
-h, --help help for import
```
# License # License

View file

@ -1,18 +0,0 @@
package main
import (
"context"
"os"
"os/signal"
"syscall"
)
func main() {
ctx, _ := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM, syscall.SIGHUP)
if cmd, err := rootCmd.ExecuteContextC(ctx); err != nil {
cmd.PrintErrln("Error:", err.Error())
cmd.PrintErrf("Run '%v --help' for usage.\n", cmd.CommandPath())
os.Exit(1)
}
}

View file

@ -1,89 +0,0 @@
package main
import (
"fmt"
"runtime"
"git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/registry"
"git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/version"
"github.com/spf13/cobra"
)
var rootCmd = &cobra.Command{
Use: "registry-exporter",
Version: version.Version,
Short: "Registry exporter",
Long: "Registry exporter for xk6",
Example: `registry-exporter registry.bolt
registry-exporter --status created --out out.json registry.bolt`,
SilenceErrors: true,
SilenceUsage: true,
RunE: rootCmdRun,
}
const (
outFlag = "out"
formatFlag = "format"
statusFlag = "status"
ageFlag = "age"
)
const (
defaultOutPath = "dumped-registry.json"
jsonFormat = "json"
createdStatus = "created"
)
func init() {
rootCmd.Flags().String(outFlag, defaultOutPath, "Path to output file")
rootCmd.Flags().String(formatFlag, jsonFormat, "Output format")
rootCmd.Flags().String(statusFlag, createdStatus, "Object status")
rootCmd.Flags().Int(ageFlag, 0, "Object age")
cobra.AddTemplateFunc("runtimeVersion", runtime.Version)
rootCmd.SetVersionTemplate(`FrostFS xk6 Registry Exporter
{{printf "Version: %s" .Version }}
GoVersion: {{ runtimeVersion }}
`)
}
func rootCmdRun(cmd *cobra.Command, args []string) error {
if len(args) != 1 {
return fmt.Errorf("expected exacly one non-flag argumet: path to the registry, got: %s", args)
}
format, err := cmd.Flags().GetString(formatFlag)
if err != nil {
return fmt.Errorf("get '%s' flag: %w", formatFlag, err)
}
if format != jsonFormat {
return fmt.Errorf("unknown format '%s', only '%s' is supported", format, jsonFormat)
}
out, err := cmd.Flags().GetString(outFlag)
if err != nil {
return fmt.Errorf("get '%s' flag: %w", outFlag, err)
}
status, err := cmd.Flags().GetString(statusFlag)
if err != nil {
return fmt.Errorf("get '%s' flag: %w", statusFlag, err)
}
age, err := cmd.Flags().GetInt(ageFlag)
if err != nil {
return fmt.Errorf("get '%s' flag: %w", ageFlag, err)
}
objRegistry := registry.NewObjRegistry(cmd.Context(), args[0])
objSelector := registry.NewObjSelector(objRegistry, 0, registry.SelectorAwaiting, &registry.ObjFilter{
Status: status,
Age: age,
})
objExporter := registry.NewObjExporter(objSelector)
cmd.Println("Writing result file:", out)
return objExporter.ExportJSONPreGen(out)
}

View file

@ -1,55 +0,0 @@
package importer
import (
"encoding/json"
"os"
"git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/registry"
)
type PreGenObj struct {
Bucket string `json:"bucket"`
Object string `json:"object"`
Container string `json:"container"`
}
type PreGenerateInfo struct {
Buckets []string `json:"buckets"`
Containers []string `json:"containers"`
Objects []PreGenObj `json:"objects"`
ObjSize string `json:"obj_size"`
}
// ImportJSONPreGen writes objects from pregenerated JSON file
// to the registry.
// Note that ImportJSONPreGen does not check if object already
// exists in the registry so in case of re-entry the registry
// will have two entities representing the same object.
func ImportJSONPreGen(o *registry.ObjRegistry, filename string) error {
f, err := os.ReadFile(filename)
if err != nil {
return err
}
var pregenInfo PreGenerateInfo
err = json.Unmarshal(f, &pregenInfo)
if err != nil {
return err
}
// AddObject uses DB.Batch to combine concurrent Batch calls
// into a single Bolt transaction. DB.Batch is limited by
// DB.MaxBatchDelay which may affect perfomance.
for _, obj := range pregenInfo.Objects {
if obj.Bucket != "" {
err = o.AddObject("", "", obj.Bucket, obj.Object, "")
} else {
err = o.AddObject(obj.Container, obj.Object, "", "", "")
}
if err != nil {
return err
}
}
return nil
}

View file

@ -1,27 +0,0 @@
package importer
import (
"git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/registry"
"github.com/spf13/cobra"
)
// Cmd represents the import command.
var Cmd = &cobra.Command{
Use: "import",
Short: "Import objects into registry",
Long: "Import objects into registry from pregenerated files",
Example: `xk6-registry import registry.bolt preset.json
xk6-registry import registry.bolt preset.json another_preset.json`,
RunE: runCmd,
Args: cobra.MinimumNArgs(2),
}
func runCmd(cmd *cobra.Command, args []string) error {
objRegistry := registry.NewObjRegistry(cmd.Context(), args[0])
for i := 1; i < len(args); i++ {
if err := ImportJSONPreGen(objRegistry, args[i]); err != nil {
return err
}
}
return nil
}

View file

@ -1,18 +0,0 @@
package main
import (
"context"
"os"
"os/signal"
"syscall"
)
func main() {
ctx, _ := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM, syscall.SIGHUP)
if cmd, err := rootCmd.ExecuteContextC(ctx); err != nil {
cmd.PrintErrln("Error:", err.Error())
cmd.PrintErrf("Run '%v --help' for usage.\n", cmd.CommandPath())
os.Exit(1)
}
}

View file

@ -1,33 +0,0 @@
package main
import (
"runtime"
"git.frostfs.info/TrueCloudLab/xk6-frostfs/cmd/xk6-registry/importer"
"git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/version"
"github.com/spf13/cobra"
)
var rootCmd = &cobra.Command{
Use: "xk6-registry",
Version: version.Version,
Short: "Command Line Tool to work with Registry",
Long: `Registry provides tools to work with object registry for xk6.
It contains command for importing objects in registry from preset`,
SilenceErrors: true,
SilenceUsage: true,
Run: rootCmdRun,
}
func init() {
cobra.AddTemplateFunc("runtimeVersion", runtime.Version)
rootCmd.SetVersionTemplate(`FrostFS xk6-registry
{{printf "Version: %s" .Version }}
GoVersion: {{ runtimeVersion }}
`)
rootCmd.AddCommand(importer.Cmd)
}
func rootCmdRun(cmd *cobra.Command, _ []string) {
_ = cmd.Usage()
}

View file

@ -1,24 +0,0 @@
import local from 'k6/x/frostfs/local';
import { uuidv4 } from '../scenarios/libs/k6-utils-1.4.0.js';
const payload = open('../go.sum', 'b');
const local_cli = local.connect("/path/to/config.yaml", "/path/to/config/dir", "", false)
export const options = {
stages: [
{duration: '30s', target: 10},
],
};
export default function () {
let headers = {
'unique_header': uuidv4()
}
const container_id = '6BVPPXQewRJ6J5EYmAPLczXxNocS7ikyF7amS2esWQnb';
let resp = local_cli.put(container_id, headers, payload)
if (resp.success) {
local_cli.get(container_id, resp.object_id)
} else {
console.log(resp.error)
}
}

View file

@ -1,13 +1,13 @@
import {uuidv4} from 'https://jslib.k6.io/k6-utils/1.2.0/index.js';
import {fail} from "k6";
import native from 'k6/x/frostfs/native'; import native from 'k6/x/frostfs/native';
import { fail } from "k6";
import { uuidv4 } from '../scenarios/libs/k6-utils-1.4.0.js';
const payload = open('../go.sum', 'b'); const payload = open('../go.sum', 'b');
const frostfs_cli = native.connect("s01.frostfs.devenv:8080", "1dd37fba80fec4e6a6f13fd708d8dcb3b29def768017052f6c930fa1c5d90bbb", 0, 0, false, 0) const frostfs_cli = native.connect("s01.frostfs.devenv:8080", "1dd37fba80fec4e6a6f13fd708d8dcb3b29def768017052f6c930fa1c5d90bbb", 0, 0)
export const options = { export const options = {
stages: [ stages: [
{ duration: '30s', target: 10 }, {duration: '30s', target: 10},
], ],
}; };
@ -24,7 +24,7 @@ export function setup() {
fail(res.error) fail(res.error)
} }
console.info("created container", res.container_id) console.info("created container", res.container_id)
return { container_id: res.container_id } return {container_id: res.container_id}
} }
export default function (data) { export default function (data) {

View file

@ -1,9 +1,9 @@
import { uuidv4 } from 'https://jslib.k6.io/k6-utils/1.2.0/index.js';
import native from 'k6/x/frostfs/native'; import native from 'k6/x/frostfs/native';
import { uuidv4 } from '../scenarios/libs/k6-utils-1.4.0.js';
const payload = open('../go.sum', 'b'); const payload = open('../go.sum', 'b');
const container = "AjSxSNNXbJUDPqqKYm1VbFVDGCakbpUNH8aGjPmGAH3B" const container = "AjSxSNNXbJUDPqqKYm1VbFVDGCakbpUNH8aGjPmGAH3B"
const frostfs_cli = native.connect("s01.frostfs.devenv:8080", "", 0, 0, false, 0) const frostfs_cli = native.connect("s01.frostfs.devenv:8080", "", 0, 0)
const frostfs_obj = frostfs_cli.onsite(container, payload) const frostfs_obj = frostfs_cli.onsite(container, payload)
export const options = { export const options = {
@ -14,11 +14,11 @@ export const options = {
export default function () { export default function () {
let headers = { let headers = {
'unique_header': uuidv4() 'unique_header': uuidv4()
} }
let resp = frostfs_obj.put(headers) let resp = frostfs_obj.put(headers)
if (resp.success) { if (resp.success) {
frostfs_cli.get(container, resp.object_id) frostfs_cli.get(container, resp.object_id)
} else { } else {
console.log(resp.error) console.log(resp.error)
} }

View file

@ -1,6 +1,6 @@
import {uuidv4} from 'https://jslib.k6.io/k6-utils/1.2.0/index.js';
import {fail} from 'k6'
import s3 from 'k6/x/frostfs/s3'; import s3 from 'k6/x/frostfs/s3';
import { fail } from 'k6'
import { uuidv4 } from '../scenarios/libs/k6-utils-1.4.0.js';
const payload = open('../go.sum', 'b'); const payload = open('../go.sum', 'b');
const bucket = "cats" const bucket = "cats"

View file

@ -1,15 +0,0 @@
import s3local from 'k6/x/frostfs/s3local';
import { uuidv4 } from '../scenarios/libs/k6-utils-1.4.0.js';
const bucket = "testbucket"
const payload = open('../go.sum', 'b');
const s3local_cli = s3local.connect("path/to/storage/config.yml", "path/to/storage/config/dir", {}, {
'testbucket': 'GBQDDUM1hdodXmiRHV57EUkFWJzuntsG8BG15wFSwam6',
});
export default function () {
const key = uuidv4();
if (s3local_cli.put(bucket, key, payload).success) {
s3local_cli.get(bucket, key)
}
}

View file

@ -2,14 +2,10 @@ package xk6_frostfs
import ( import (
_ "git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/datagen" _ "git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/datagen"
_ "git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/env"
_ "git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/local"
_ "git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/logging" _ "git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/logging"
_ "git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/native" _ "git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/native"
_ "git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/profile"
_ "git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/registry" _ "git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/registry"
_ "git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/s3" _ "git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/s3"
_ "git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/s3local"
"go.k6.io/k6/js/modules" "go.k6.io/k6/js/modules"
) )

168
go.mod
View file

@ -1,133 +1,71 @@
module git.frostfs.info/TrueCloudLab/xk6-frostfs module git.frostfs.info/TrueCloudLab/xk6-frostfs
go 1.22 go 1.17
require ( require (
git.frostfs.info/TrueCloudLab/frostfs-node v0.38.3-0.20240502170333-ec2873caa7c6 git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20230307124721-94476f905599
git.frostfs.info/TrueCloudLab/frostfs-s3-gw v0.29.0-rc.1.0.20240422122918-034396d554ec
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240502080121-12ddefe07877
git.frostfs.info/TrueCloudLab/tzhash v1.8.0 git.frostfs.info/TrueCloudLab/tzhash v1.8.0
github.com/aws/aws-sdk-go-v2 v1.19.0 github.com/aws/aws-sdk-go-v2 v1.16.3
github.com/aws/aws-sdk-go-v2/config v1.18.28 github.com/aws/aws-sdk-go-v2/config v1.15.5
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.72 github.com/aws/aws-sdk-go-v2/service/s3 v1.26.9
github.com/aws/aws-sdk-go-v2/service/s3 v1.37.0 github.com/dop251/goja v0.0.0-20220405120441-9037c2b61cbf
github.com/dop251/goja v0.0.0-20230626124041-ba8a63e79201
github.com/go-loremipsum/loremipsum v1.1.3 github.com/go-loremipsum/loremipsum v1.1.3
github.com/google/uuid v1.6.0 github.com/google/uuid v1.3.0
github.com/joho/godotenv v1.5.1 github.com/nspcc-dev/neo-go v0.100.1
github.com/nspcc-dev/neo-go v0.105.1 github.com/sirupsen/logrus v1.8.1
github.com/panjf2000/ants/v2 v2.9.0 github.com/stretchr/testify v1.8.1
github.com/sirupsen/logrus v1.9.3 go.etcd.io/bbolt v1.3.6
github.com/spf13/cobra v1.8.0 go.k6.io/k6 v0.38.2
github.com/stretchr/testify v1.8.4
go.etcd.io/bbolt v1.3.8
go.k6.io/k6 v0.45.1
go.uber.org/zap v1.26.0
golang.org/x/sys v0.18.0
) )
require ( require (
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240427200446-67c6f305b21f // indirect git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.11.2-0.20230307104236-f69d2ad83c51 // indirect
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240409115729-6eb492025bdd // indirect
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 // indirect git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 // indirect
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20231101111734-b3ad3335ff65 // indirect git.frostfs.info/TrueCloudLab/hrw v1.2.0 // indirect
git.frostfs.info/TrueCloudLab/hrw v1.2.1 // indirect
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240416071728-04a79f57ef1f // indirect
git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0 // indirect git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0 // indirect
git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02 // indirect github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20221202181307-76fa05c21b12 // indirect
github.com/antlr4-go/antlr/v4 v4.13.0 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.1 // indirect
github.com/aws/aws-sdk-go v1.44.296 // indirect github.com/aws/aws-sdk-go-v2/credentials v1.12.0 // indirect
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10 // indirect github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.4 // indirect
github.com/aws/aws-sdk-go-v2/credentials v1.13.27 // indirect github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.10 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.5 // indirect github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.4 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.35 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.3.11 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.29 // indirect github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.1 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.36 // indirect github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.1 // indirect
github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.27 // indirect github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.5 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.11 // indirect github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.4 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.30 // indirect github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.4 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.29 // indirect github.com/aws/aws-sdk-go-v2/service/sso v1.11.4 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.14.4 // indirect github.com/aws/aws-sdk-go-v2/service/sts v1.16.4 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.12.13 // indirect github.com/aws/smithy-go v1.11.2 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.13 // indirect github.com/davecgh/go-spew v1.1.1 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.19.3 // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 // indirect
github.com/aws/smithy-go v1.13.5 // indirect github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91 // indirect
github.com/beorn7/perks v1.0.1 // indirect github.com/fatih/color v1.13.0 // indirect
github.com/bluele/gcache v0.0.2 // indirect
github.com/cenkalti/backoff/v4 v4.2.1 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect
github.com/dlclark/regexp2 v1.10.0 // indirect
github.com/fatih/color v1.15.0 // indirect
github.com/fsnotify/fsnotify v1.7.0 // indirect
github.com/go-chi/chi/v5 v5.0.8 // indirect
github.com/go-logr/logr v1.4.1 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-pkgz/expirable-cache/v3 v3.0.0 // indirect
github.com/go-sourcemap/sourcemap v2.1.4-0.20211119122758-180fcef48034+incompatible // indirect github.com/go-sourcemap/sourcemap v2.1.4-0.20211119122758-180fcef48034+incompatible // indirect
github.com/golang/snappy v0.0.4 // indirect github.com/golang/protobuf v1.5.2 // indirect
github.com/google/pprof v0.0.0-20230817174616-7a8ec2ada47b // indirect github.com/hashicorp/golang-lru v0.6.0 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 // indirect github.com/kr/pretty v0.3.0 // indirect
github.com/hashicorp/golang-lru v1.0.2 // indirect
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/klauspost/compress v1.17.4 // indirect
github.com/magiconair/properties v1.8.7 // indirect
github.com/mailru/easyjson v0.7.7 // indirect github.com/mailru/easyjson v0.7.7 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-colorable v0.1.12 // indirect
github.com/mattn/go-isatty v0.0.19 // indirect github.com/mattn/go-isatty v0.0.14 // indirect
github.com/minio/sio v0.3.1 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/mr-tron/base58 v1.2.0 // indirect github.com/mr-tron/base58 v1.2.0 // indirect
github.com/mstoykov/atlas v0.0.0-20220811071828-388f114305dd // indirect
github.com/nats-io/nats.go v1.32.0 // indirect
github.com/nats-io/nkeys v0.4.7 // indirect
github.com/nats-io/nuid v1.0.1 // indirect
github.com/nspcc-dev/go-ordered-json v0.0.0-20240112074137-296698a162ae // indirect
github.com/nspcc-dev/rfc6979 v0.2.0 // indirect github.com/nspcc-dev/rfc6979 v0.2.0 // indirect
github.com/onsi/gomega v1.20.2 // indirect github.com/nxadm/tail v1.4.8 // indirect
github.com/pelletier/go-toml/v2 v2.1.1 // indirect github.com/oxtoacart/bpool v0.0.0-20190530202638-03653db5a59c // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_golang v1.18.0 // indirect
github.com/prometheus/client_model v0.5.0 // indirect
github.com/prometheus/common v0.46.0 // indirect
github.com/prometheus/procfs v0.12.0 // indirect
github.com/sagikazarmark/locafero v0.4.0 // indirect
github.com/sagikazarmark/slog-shim v0.1.0 // indirect
github.com/serenize/snaker v0.0.0-20201027110005-a7ad2135616e // indirect github.com/serenize/snaker v0.0.0-20201027110005-a7ad2135616e // indirect
github.com/sourcegraph/conc v0.3.0 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect
github.com/spf13/afero v1.11.0 // indirect github.com/spf13/afero v1.1.2 // indirect
github.com/spf13/cast v1.6.0 // indirect golang.org/x/crypto v0.4.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect golang.org/x/exp v0.0.0-20221227203929-1b447090c38c // indirect
github.com/spf13/viper v1.18.2 // indirect golang.org/x/net v0.3.0 // indirect
github.com/ssgreg/journald v1.0.0 // indirect golang.org/x/sys v0.3.0 // indirect
github.com/subosito/gotenv v1.6.0 // indirect golang.org/x/text v0.5.0 // indirect
github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect golang.org/x/time v0.0.0-20220224211638-0e9765cccd65 // indirect
github.com/twmb/murmur3 v1.1.8 // indirect google.golang.org/genproto v0.0.0-20200903010400-9bfcb5116336 // indirect
go.opentelemetry.io/otel v1.22.0 // indirect google.golang.org/grpc v1.48.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0 // indirect google.golang.org/protobuf v1.28.1 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.22.0 // indirect gopkg.in/guregu/null.v3 v3.3.0 // indirect
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.22.0 // indirect
go.opentelemetry.io/otel/metric v1.22.0 // indirect
go.opentelemetry.io/otel/sdk v1.22.0 // indirect
go.opentelemetry.io/otel/trace v1.22.0 // indirect
go.opentelemetry.io/proto/otlp v1.1.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
golang.org/x/crypto v0.21.0 // indirect
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a // indirect
golang.org/x/net v0.23.0 // indirect
golang.org/x/sync v0.6.0 // indirect
golang.org/x/text v0.14.0 // indirect
golang.org/x/time v0.5.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de // indirect
google.golang.org/grpc v1.63.2 // indirect
google.golang.org/protobuf v1.33.0 // indirect
gopkg.in/guregu/null.v3 v3.5.0 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect
) )

972
go.sum

File diff suppressed because it is too large Load diff

22
help.mk
View file

@ -1,22 +0,0 @@
.PHONY: help
# Show this help prompt
help:
@echo ' Usage:'
@echo ''
@echo ' make <target>'
@echo ''
@echo ' Targets:'
@echo ''
@awk '/^#/{ comment = substr($$0,3) } comment && /^[a-zA-Z][a-zA-Z0-9.%_/-]+ ?:/{ print " ", $$1, comment }' $(MAKEFILE_LIST) | column -t -s ':' | grep -v 'IGNORE' | sort | uniq
# Show help for docker/% IGNORE
help.docker/%:
$(eval TARGETS:=$(notdir all lint) ${BINS})
@echo ' Usage:'
@echo ''
@echo ' make docker/% -- Run `make %` in Golang container'
@echo ''
@echo ' Supported docker targets:'
@echo ''
@$(foreach bin, $(TARGETS), echo ' ' $(bin);)

View file

@ -38,7 +38,7 @@ func (d *Datagen) Exports() modules.Exports {
return modules.Exports{Default: d} return modules.Exports{Default: d}
} }
func (d *Datagen) Generator(size int, typ string, streaming bool) *Generator { func (d *Datagen) Generator(size int, typ string) *Generator {
g := NewGenerator(d.vu, size, strings.ToLower(typ), streaming) g := NewGenerator(d.vu, size, strings.ToLower(typ))
return &g return &g
} }

View file

@ -2,10 +2,12 @@ package datagen
import ( import (
"bytes" "bytes"
"crypto/sha256"
"encoding/hex"
"math/rand" "math/rand"
"sync/atomic"
"time" "time"
"github.com/dop251/goja"
"github.com/go-loremipsum/loremipsum" "github.com/go-loremipsum/loremipsum"
"go.k6.io/k6/js/modules" "go.k6.io/k6/js/modules"
) )
@ -26,9 +28,11 @@ type (
buf []byte buf []byte
typ string typ string
offset int offset int
}
streaming bool GenPayloadResponse struct {
seed *atomic.Int64 Payload goja.ArrayBuffer
Hash string
} }
) )
@ -41,66 +45,57 @@ var payloadTypes = []string{
"", "",
} }
func NewGenerator(vu modules.VU, size int, typ string, streaming bool) Generator { func NewGenerator(vu modules.VU, size int, typ string) Generator {
if size <= 0 { if size <= 0 {
panic("size should be positive") panic("size should be positive")
} }
var found bool var found bool
for i := range payloadTypes { for i := range payloadTypes {
if payloadTypes[i] == typ { if payloadTypes[i] == typ {
found = true found = true
break
} }
} }
if !found { if !found {
vu.InitEnv().Logger.Info("Unknown payload type '%s', random will be used.", typ) vu.InitEnv().Logger.Info("Unknown payload type '%s', random will be used.", typ)
} }
g := Generator{ return Generator{
vu: vu, vu: vu,
size: size, size: size,
rand: rand.New(rand.NewSource(time.Now().UnixNano())),
buf: make([]byte, size+TailSize),
typ: typ, typ: typ,
} }
return Generator{vu: vu, size: size, buf: nil, typ: typ, offset: 0}
if streaming {
g.streaming = true
g.seed = new(atomic.Int64)
} else {
g.rand = rand.New(rand.NewSource(time.Now().UnixNano()))
g.buf = make([]byte, size+TailSize)
g.fillBuffer()
}
return g
} }
func (g *Generator) fillBuffer() { func (g *Generator) GenPayload(calcHash bool) GenPayloadResponse {
switch g.typ {
case "text":
li := loremipsum.New()
b := bytes.NewBuffer(g.buf[:0])
for b.Len() < g.size+TailSize {
b.WriteString(li.Paragraph())
b.WriteRune('\n')
}
g.buf = b.Bytes()
default:
g.rand.Read(g.buf) // Per docs, err is always nil here
}
}
func (g *Generator) GenPayload() Payload {
if g.streaming {
return NewStreamPayload(g.size, g.seed.Add(1), g.typ)
}
data := g.nextSlice() data := g.nextSlice()
return NewFixedPayload(data)
dataHash := ""
if calcHash {
hashBytes := sha256.Sum256(data)
dataHash = hex.EncodeToString(hashBytes[:])
}
payload := g.vu.Runtime().NewArrayBuffer(data)
return GenPayloadResponse{Payload: payload, Hash: dataHash}
} }
func (g *Generator) nextSlice() []byte { func (g *Generator) nextSlice() []byte {
if g.offset+g.size >= len(g.buf) { if g.offset+g.size >= len(g.buf) {
g.offset = 0 switch g.typ {
g.fillBuffer() case "text":
li := loremipsum.New()
b := bytes.NewBuffer(g.buf[:0])
for b.Len() < g.size+TailSize {
b.WriteString(li.Paragraph())
b.WriteRune('\n')
}
g.buf = b.Bytes()
default:
g.offset = 0
g.rand.Read(g.buf) // Per docs, err is always nil here
}
} }
result := g.buf[g.offset : g.offset+g.size] result := g.buf[g.offset : g.offset+g.size]

View file

@ -16,25 +16,25 @@ func TestGenerator(t *testing.T) {
t.Run("fails on negative size", func(t *testing.T) { t.Run("fails on negative size", func(t *testing.T) {
require.Panics(t, func() { require.Panics(t, func() {
_ = NewGenerator(vu, -1, "", false) _ = NewGenerator(vu, -1, "")
}) })
}) })
t.Run("fails on zero size", func(t *testing.T) { t.Run("fails on zero size", func(t *testing.T) {
require.Panics(t, func() { require.Panics(t, func() {
_ = NewGenerator(vu, 0, "", false) _ = NewGenerator(vu, 0, "")
}) })
}) })
t.Run("creates slice of specified size", func(t *testing.T) { t.Run("creates slice of specified size", func(t *testing.T) {
size := 10 size := 10
g := NewGenerator(vu, size, "", false) g := NewGenerator(vu, size, "")
slice := g.nextSlice() slice := g.nextSlice()
require.Len(t, slice, size) require.Len(t, slice, size)
}) })
t.Run("creates a different slice on each call", func(t *testing.T) { t.Run("creates a different slice on each call", func(t *testing.T) {
g := NewGenerator(vu, 1000, "", false) g := NewGenerator(vu, 1000, "")
slice1 := g.nextSlice() slice1 := g.nextSlice()
slice2 := g.nextSlice() slice2 := g.nextSlice()
// Each slice should be unique (assuming that 1000 random bytes will never coincide // Each slice should be unique (assuming that 1000 random bytes will never coincide
@ -43,7 +43,7 @@ func TestGenerator(t *testing.T) {
}) })
t.Run("keeps generating slices after consuming entire tail", func(t *testing.T) { t.Run("keeps generating slices after consuming entire tail", func(t *testing.T) {
g := NewGenerator(vu, 1000, "", false) g := NewGenerator(vu, 1000, "")
initialSlice := g.nextSlice() initialSlice := g.nextSlice()
for i := 0; i < TailSize; i++ { for i := 0; i < TailSize; i++ {
g.nextSlice() g.nextSlice()

View file

@ -1,121 +0,0 @@
package datagen
import (
"bufio"
"bytes"
"crypto/sha256"
"encoding/hex"
"hash"
"io"
"math/rand"
"github.com/go-loremipsum/loremipsum"
)
// Payload represents arbitrary data to be packed into S3 or native object.
// Implementations could be thread-unsafe.
type Payload interface {
// Reader returns io.Reader instance to read the payload.
// Must not be called twice.
Reader() io.Reader
// Bytes is a helper which reads all data from Reader() into slice.
// The sole purpose of this method is to simplify HTTP scenario,
// where all payload needs to be read and wrapped.
Bytes() []byte
// Size returns payload size, which is equal to the total amount of data
// that could be read from the Reader().
Size() int
// Hash returns payload sha256 hash. Must be called after all data is read from the reader.
Hash() string
}
type bytesPayload struct {
data []byte
}
func (p *bytesPayload) Reader() io.Reader {
return bytes.NewReader(p.data)
}
func (p *bytesPayload) Size() int {
return len(p.data)
}
func (p *bytesPayload) Hash() string {
h := sha256.Sum256(p.data[:])
return hex.EncodeToString(h[:])
}
func (p *bytesPayload) Bytes() []byte {
return p.data
}
func NewFixedPayload(data []byte) Payload {
return &bytesPayload{data: data}
}
type randomPayload struct {
r io.Reader
s hash.Hash
h string
size int
}
func NewStreamPayload(size int, seed int64, typ string) Payload {
var rr io.Reader
switch typ {
case "text":
rr = &textReader{li: loremipsum.NewWithSeed(seed)}
default:
rr = rand.New(rand.NewSource(seed))
}
lr := io.LimitReader(rr, int64(size))
// We need some buffering to write complete blocks in the TeeReader.
// Streaming payload read is expected to be used for big objects, thus 4k seems like a good choice.
br := bufio.NewReaderSize(lr, 4096)
s := sha256.New()
tr := io.TeeReader(br, s)
return &randomPayload{
r: tr,
s: s,
size: size,
}
}
func (p *randomPayload) Reader() io.Reader {
return p.r
}
func (p *randomPayload) Size() int {
return p.size
}
func (p *randomPayload) Hash() string {
if p.h == "" {
p.h = hex.EncodeToString(p.s.Sum(nil))
// Prevent possible misuse.
p.r = nil
p.s = nil
}
return p.h
}
func (p *randomPayload) Bytes() []byte {
data, err := io.ReadAll(p.r)
if err != nil {
// We use only 2 readers, either `bytes.Reader` or `rand.Reader`.
// None of them returns errors, thus encountering an error is a fatal error.
panic(err)
}
return data
}
type textReader struct {
li *loremipsum.LoremIpsum
}
func (r *textReader) Read(p []byte) (n int, err error) {
paragraph := r.li.Paragraph()
return copy(p, paragraph), nil
}

View file

@ -1,40 +0,0 @@
package datagen
import (
"crypto/rand"
"crypto/sha256"
"encoding/hex"
"io"
"testing"
"github.com/stretchr/testify/require"
)
func TestFixedPayload(t *testing.T) {
const size = 123
data := make([]byte, size)
_, err := rand.Read(data)
require.NoError(t, err)
p := NewFixedPayload(data)
require.Equal(t, size, p.Size())
actual, err := io.ReadAll(p.Reader())
require.NoError(t, err)
require.Equal(t, data, actual)
h := sha256.Sum256(data)
require.Equal(t, hex.EncodeToString(h[:]), p.Hash())
}
func TestStreamingPayload(t *testing.T) {
const size = 123
p := NewStreamPayload(size, 0, "")
require.Equal(t, size, p.Size())
actual, err := io.ReadAll(p.Reader())
require.NoError(t, err)
require.Equal(t, size, len(actual))
require.Equal(t, sha256.Size*2, len(p.Hash()))
}

View file

@ -1,50 +0,0 @@
package env
import (
"os"
"github.com/joho/godotenv"
"go.k6.io/k6/js/modules"
)
// RootModule is the global module object type. It is instantiated once per test
// run and will be used to create k6/x/frostfs/registry module instances for each VU.
type RootModule struct{}
// Parser represents an instance of the module for every VU.
type Parser struct {
vu modules.VU
}
// Ensure the interfaces are implemented correctly.
var (
_ modules.Instance = &Parser{}
_ modules.Module = &RootModule{}
)
func init() {
modules.Register("k6/x/frostfs/env", new(RootModule))
}
// NewModuleInstance implements the modules.Module interface and returns
// a new instance for each VU.
func (r *RootModule) NewModuleInstance(vu modules.VU) modules.Instance {
mi := &Parser{vu: vu}
return mi
}
// Exports implements the modules.Instance interface and returns the exports
// of the JS module.
func (p *Parser) Exports() modules.Exports {
return modules.Exports{Default: p}
}
func (p *Parser) Parse(fileName string) (map[string]string, error) {
f, err := os.Open(fileName)
if err != nil {
return nil, err
}
defer f.Close()
return godotenv.Parse(f)
}

View file

@ -1,84 +0,0 @@
package local
import (
"fmt"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/datagen"
"git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/local/rawclient"
"go.k6.io/k6/js/modules"
)
type Client struct {
vu modules.VU
rc *rawclient.RawClient
l Limiter
}
type (
SuccessOrErrorResponse struct {
Success bool
Error string
}
PutResponse struct {
Success bool
ObjectID string
Error string
Abort bool
}
GetResponse SuccessOrErrorResponse
DeleteResponse SuccessOrErrorResponse
)
func (c *Client) Put(containerID string, headers map[string]string, payload datagen.Payload) PutResponse {
if c.l.IsFull() {
return PutResponse{
Success: false,
Error: "engine size limit reached",
Abort: true,
}
}
id, err := c.rc.Put(c.vu.Context(), mustParseContainerID(containerID), nil, headers, payload.Bytes())
if err != nil {
return PutResponse{Error: err.Error()}
}
return PutResponse{
Success: true,
ObjectID: id.EncodeToString(),
}
}
func (c *Client) Get(containerID, objectID string) GetResponse {
if _, err := c.rc.Get(c.vu.Context(), mustParseContainerID(containerID), mustParseObjectID(objectID)); err != nil {
return GetResponse{Error: err.Error()}
}
return GetResponse{Success: true}
}
func (c *Client) Delete(containerID, objectID string) DeleteResponse {
if err := c.rc.Delete(c.vu.Context(), mustParseContainerID(containerID), mustParseObjectID(objectID)); err != nil {
return DeleteResponse{Error: err.Error()}
}
return DeleteResponse{Success: true}
}
func mustParseContainerID(strContainerID string) cid.ID {
var containerID cid.ID
err := containerID.DecodeString(strContainerID)
if err != nil {
panic(fmt.Sprintf("parsing container id %q: %v", strContainerID, err))
}
return containerID
}
func mustParseObjectID(strObjectID string) oid.ID {
var cliObjectID oid.ID
err := cliObjectID.DecodeString(strObjectID)
if err != nil {
panic(fmt.Sprintf("parsing object id %q: %v", strObjectID, err))
}
return cliObjectID
}

View file

@ -1,105 +0,0 @@
package local
import (
"sync/atomic"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/metrics"
)
var (
_ Limiter = &noopLimiter{}
_ Limiter = &sizeLimiter{}
)
type Limiter interface {
engine.MetricRegister
IsFull() bool
}
func NewLimiter(maxSizeGB int64) Limiter {
if maxSizeGB < 0 {
panic("max size is negative")
}
if maxSizeGB == 0 {
return &noopLimiter{}
}
return &sizeLimiter{
maxSize: maxSizeGB * 1024 * 1024 * 1024,
currentSize: &atomic.Int64{},
}
}
type sizeLimiter struct {
maxSize int64
currentSize *atomic.Int64
}
func (*sizeLimiter) AddMethodDuration(method string, d time.Duration) {}
func (*sizeLimiter) AddToContainerSize(cnrID string, size int64) {}
func (*sizeLimiter) AddToObjectCounter(shardID string, objectType string, delta int) {}
func (*sizeLimiter) ClearErrorCounter(shardID string) {}
func (*sizeLimiter) DeleteShardMetrics(shardID string) {}
func (*sizeLimiter) GC() metrics.GCMetrics { return &noopGCMetrics{} }
func (*sizeLimiter) IncErrorCounter(shardID string) {}
func (*sizeLimiter) SetMode(shardID string, mode mode.Mode) {}
func (*sizeLimiter) SetObjectCounter(shardID string, objectType string, v uint64) {}
func (*sizeLimiter) WriteCache() metrics.WriteCacheMetrics { return &noopWriteCacheMetrics{} }
func (*sizeLimiter) DeleteContainerSize(cnrID string) {}
func (*sizeLimiter) DeleteContainerCount(cnrID string) {}
func (*sizeLimiter) SetContainerObjectCounter(_, _, _ string, _ uint64) {}
func (*sizeLimiter) IncContainerObjectCounter(_, _, _ string) {}
func (*sizeLimiter) SubContainerObjectCounter(_, _, _ string, _ uint64) {}
func (*sizeLimiter) IncRefillObjectsCount(_, _ string, _ int, _ bool) {}
func (*sizeLimiter) SetRefillPercent(_, _ string, _ uint32) {}
func (*sizeLimiter) SetRefillStatus(_, _, _ string) {}
func (sl *sizeLimiter) AddToPayloadCounter(shardID string, size int64) {
sl.currentSize.Add(size)
}
func (sl *sizeLimiter) IsFull() bool {
cur := sl.currentSize.Load()
return cur > sl.maxSize
}
type noopLimiter struct{}
func (*noopLimiter) AddMethodDuration(method string, d time.Duration) {}
func (*noopLimiter) AddToContainerSize(cnrID string, size int64) {}
func (*noopLimiter) AddToObjectCounter(shardID string, objectType string, delta int) {}
func (*noopLimiter) AddToPayloadCounter(shardID string, size int64) {}
func (*noopLimiter) ClearErrorCounter(shardID string) {}
func (*noopLimiter) DeleteShardMetrics(shardID string) {}
func (*noopLimiter) GC() metrics.GCMetrics { return &noopGCMetrics{} }
func (*noopLimiter) IncErrorCounter(shardID string) {}
func (*noopLimiter) SetMode(shardID string, mode mode.Mode) {}
func (*noopLimiter) SetObjectCounter(shardID string, objectType string, v uint64) {}
func (*noopLimiter) WriteCache() metrics.WriteCacheMetrics { return &noopWriteCacheMetrics{} }
func (*noopLimiter) IsFull() bool { return false }
func (*noopLimiter) DeleteContainerSize(cnrID string) {}
func (*noopLimiter) DeleteContainerCount(cnrID string) {}
func (*noopLimiter) SetContainerObjectCounter(_, _, _ string, _ uint64) {}
func (*noopLimiter) IncContainerObjectCounter(_, _, _ string) {}
func (*noopLimiter) SubContainerObjectCounter(_, _, _ string, _ uint64) {}
func (*noopLimiter) IncRefillObjectsCount(_, _ string, _ int, _ bool) {}
func (*noopLimiter) SetRefillPercent(_, _ string, _ uint32) {}
func (*noopLimiter) SetRefillStatus(_, _, _ string) {}
type noopGCMetrics struct{}
func (*noopGCMetrics) AddDeletedCount(shardID string, deleted uint64, failed uint64) {}
func (*noopGCMetrics) AddExpiredObjectCollectionDuration(string, time.Duration, bool, string) {}
func (*noopGCMetrics) AddInhumedObjectCount(shardID string, count uint64, objectType string) {}
func (*noopGCMetrics) AddRunDuration(shardID string, d time.Duration, success bool) {}
type noopWriteCacheMetrics struct{}
func (*noopWriteCacheMetrics) AddMethodDuration(_, _, _, _ string, _ bool, _ time.Duration) {}
func (*noopWriteCacheMetrics) Close(_, _ string) {}
func (*noopWriteCacheMetrics) IncOperationCounter(_, _, _, _ string, _ metrics.NullBool) {}
func (*noopWriteCacheMetrics) SetActualCount(_, _, _ string, count uint64) {}
func (*noopWriteCacheMetrics) SetEstimateSize(_, _, _ string, _ uint64) {}
func (*noopWriteCacheMetrics) SetMode(shardID string, mode string) {}

View file

@ -1,390 +0,0 @@
package local
import (
"context"
"errors"
"fmt"
"sync"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
engineconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine"
shardconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard"
blobovniczaconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/blobovnicza"
fstreeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/fstree"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
metabase "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/local/rawclient"
"git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/stats"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/panjf2000/ants/v2"
"go.etcd.io/bbolt"
"go.k6.io/k6/js/modules"
"go.k6.io/k6/metrics"
"go.uber.org/zap"
"golang.org/x/sys/unix"
)
// RootModule is the global module object type. It is instantiated once per test
// run and will be used to create k6/x/frostfs/local module instances for each VU.
type RootModule struct {
mu sync.Mutex
// configFile is the name of the configuration file used during one test.
configFile string
// configDir is the name of the configuration directory used during one test.
configDir string
// ng is the engine instance used during one test, corresponding to the configFile. Each VU
// gets the same engine instance.
ng *engine.StorageEngine
l Limiter
}
// Local represents an instance of the module for every VU.
type Local struct {
vu modules.VU
ResolveEngine func(context.Context, string, string, bool, int64) (*engine.StorageEngine, Limiter, error)
}
// Ensure the interfaces are implemented correctly.
var (
_ modules.Module = &RootModule{}
_ modules.Instance = &Local{}
objPutSuccess, objPutFails, objPutDuration, objPutData *metrics.Metric
objGetSuccess, objGetFails, objGetDuration, objGetData *metrics.Metric
objDeleteSuccess, objDeleteFails, objDeleteDuration *metrics.Metric
)
func init() {
modules.Register("k6/x/frostfs/local", &RootModule{})
}
// NewModuleInstance implements the modules.Module interface and returns
// a new instance for each VU.
func (r *RootModule) NewModuleInstance(vu modules.VU) modules.Instance {
return NewLocalModuleInstance(vu, r.GetOrCreateEngine)
}
func NewLocalModuleInstance(vu modules.VU, resolveEngine func(context.Context, string, string, bool, int64) (*engine.StorageEngine, Limiter, error)) *Local {
return &Local{
vu: vu,
ResolveEngine: resolveEngine,
}
}
// checkResourceLimits checks the current limit on NOFILE.
//
// The usual default is around 1024 and this is too low for production clusters where a value of
// about 65536 is needed in order to not run into errors because of attempting to open too many files.
// This is needed for the local storage engine scenarios, where the user running the scenario is not
// necessarily the service user, for which the limits are preconfigured correctly.
//
// See: https://k6.io/docs/misc/fine-tuning-os/
func checkResourceLimits() error {
const (
minNofileLimit = 1 << 16
)
rlimit := &unix.Rlimit{}
if err := unix.Getrlimit(unix.RLIMIT_NOFILE, rlimit); err != nil {
return fmt.Errorf("getting resource limits: %v", err)
}
if rlimit.Cur < minNofileLimit {
return fmt.Errorf("nofile limit is too low: %d", rlimit.Cur)
}
return nil
}
// GetOrCreateEngine returns the current engine instance for the given configuration file or directory,
// creating a new one if none exists. Note that the identity of configuration files is their
// file name for the purposes of test runs.
func (r *RootModule) GetOrCreateEngine(ctx context.Context, configFile string, configDir string, debug bool, maxSizeGB int64) (*engine.StorageEngine, Limiter, error) {
r.mu.Lock()
defer r.mu.Unlock()
if len(configFile) == 0 && len(configDir) == 0 {
return nil, nil, errors.New("provide configFile or configDir")
}
if r.l == nil {
r.l = NewLimiter(maxSizeGB)
}
// Create and initialize engine for the given configFile if it doesn't exist already
if r.ng == nil {
r.configFile = configFile
r.configDir = configDir
appCfg := config.New(configFile, configDir, "")
ngOpts, shardOpts, err := storageEngineOptionsFromConfig(ctx, appCfg, debug, r.l)
if err != nil {
return nil, nil, fmt.Errorf("creating engine options from config: %v", err)
}
if err := checkResourceLimits(); err != nil {
return nil, nil, err
}
r.ng = engine.New(ngOpts...)
for i, opts := range shardOpts {
if _, err := r.ng.AddShard(ctx, opts...); err != nil {
return nil, nil, fmt.Errorf("adding shard %d: %v", i, err)
}
}
if err := r.ng.Open(ctx); err != nil {
return nil, nil, fmt.Errorf("opening engine: %v", err)
}
if err := r.ng.Init(ctx); err != nil {
return nil, nil, fmt.Errorf("initializing engine: %v", err)
}
} else if configFile != r.configFile {
return nil, nil, fmt.Errorf("GetOrCreateEngine called with mismatching configFile after engine was "+
"initialized: got %q, want %q", configFile, r.configFile)
} else if configDir != r.configDir {
return nil, nil, fmt.Errorf("GetOrCreateEngine called with mismatching configDir after engine was "+
"initialized: got %q, want %q", configDir, r.configDir)
}
return r.ng, r.l, nil
}
// Exports implements the modules.Instance interface and returns the exports
// of the JS module.
func (s *Local) Exports() modules.Exports {
return modules.Exports{Default: s}
}
func (s *Local) VU() modules.VU { return s.vu }
func (s *Local) Connect(configFile, configDir, hexKey string, debug bool, maxSizeGB int64) (*Client, error) {
ng, l, err := s.ResolveEngine(s.VU().Context(), configFile, configDir, debug, maxSizeGB)
if err != nil {
return nil, fmt.Errorf("connecting to engine for config - file %q dir %q: %v", configFile, configDir, err)
}
key, err := ParseOrCreateKey(hexKey)
if err != nil {
return nil, fmt.Errorf("creating key: %v", err)
}
// Register metrics.
objPutSuccess, _ = stats.Registry.NewMetric("local_obj_put_success", metrics.Counter)
objPutFails, _ = stats.Registry.NewMetric("local_obj_put_fails", metrics.Counter)
objPutDuration, _ = stats.Registry.NewMetric("local_obj_put_duration", metrics.Trend, metrics.Time)
objPutData, _ = stats.Registry.NewMetric("local_obj_put_bytes", metrics.Counter, metrics.Data)
objGetSuccess, _ = stats.Registry.NewMetric("local_obj_get_success", metrics.Counter)
objGetFails, _ = stats.Registry.NewMetric("local_obj_get_fails", metrics.Counter)
objGetDuration, _ = stats.Registry.NewMetric("local_obj_get_duration", metrics.Trend, metrics.Time)
objGetData, _ = stats.Registry.NewMetric("local_obj_get_bytes", metrics.Counter, metrics.Data)
objDeleteSuccess, _ = stats.Registry.NewMetric("local_obj_delete_success", metrics.Counter)
objDeleteFails, _ = stats.Registry.NewMetric("local_obj_delete_fails", metrics.Counter)
objDeleteDuration, _ = stats.Registry.NewMetric("local_obj_delete_duration", metrics.Trend, metrics.Time)
// Create raw client backed by local storage engine.
rc := rawclient.New(ng,
rawclient.WithKey(key.PrivateKey),
rawclient.WithPutHandler(func(sz uint64, err error, dt time.Duration) {
if err != nil {
stats.Report(s.vu, objPutFails, 1)
} else {
stats.Report(s.vu, objPutSuccess, 1)
stats.ReportDataSent(s.vu, float64(sz))
stats.Report(s.vu, objPutDuration, metrics.D(dt))
stats.Report(s.vu, objPutData, float64(sz))
}
}),
rawclient.WithGetHandler(func(sz uint64, err error, dt time.Duration) {
if err != nil {
stats.Report(s.vu, objGetFails, 1)
} else {
stats.Report(s.vu, objGetSuccess, 1)
stats.Report(s.vu, objGetDuration, metrics.D(dt))
stats.ReportDataReceived(s.vu, float64(sz))
stats.Report(s.vu, objGetData, float64(sz))
}
}),
rawclient.WithDeleteHandler(func(err error, dt time.Duration) {
if err != nil {
stats.Report(s.vu, objDeleteFails, 1)
} else {
stats.Report(s.vu, objDeleteSuccess, 1)
stats.Report(s.vu, objDeleteDuration, metrics.D(dt))
}
}),
)
return &Client{vu: s.vu, rc: rc, l: l}, nil
}
type epochState struct{}
func (epochState) CurrentEpoch() uint64 { return 0 }
// storageEngineOptionsFromConfig loads a configuration file and returns the corresponding
// engine and shard options to recreate an engine usable with an existing storage instance.
// This makes sure that the local loader uses the same engine configuration as the one that
// preloaded the storage (if any), by using the same configuration file.
//
// Note that the configuration file only needs to contain the storage-specific sections.
func storageEngineOptionsFromConfig(ctx context.Context, c *config.Config, debug bool, l Limiter) ([]engine.Option, [][]shard.Option, error) {
log := zap.L()
if debug {
var err error
log, err = zap.NewDevelopment()
if err != nil {
return nil, nil, fmt.Errorf("creating development logger: %v", err)
}
}
ngOpts := []engine.Option{
engine.WithErrorThreshold(engineconfig.ShardErrorThreshold(c)),
engine.WithShardPoolSize(engineconfig.ShardPoolSize(c)),
engine.WithLogger(&logger.Logger{Logger: log}),
engine.WithMetrics(l),
}
var shOpts [][]shard.Option
err := engineconfig.IterateShards(c, false, func(sc *shardconfig.Config) error {
opts := []shard.Option{
shard.WithRefillMetabase(sc.RefillMetabase()),
shard.WithMode(sc.Mode()),
shard.WithLogger(&logger.Logger{Logger: log}),
}
// substorages
{
var substorages []blobstor.SubStorage
for _, scfg := range sc.BlobStor().Storages() {
switch scfg.Type() {
case blobovniczatree.Type:
cfg := blobovniczaconfig.From((*config.Config)(scfg))
ss := blobstor.SubStorage{
Storage: blobovniczatree.NewBlobovniczaTree(
ctx,
blobovniczatree.WithRootPath(scfg.Path()),
blobovniczatree.WithPermissions(scfg.Perm()),
blobovniczatree.WithBlobovniczaSize(cfg.Size()),
blobovniczatree.WithBlobovniczaShallowDepth(cfg.ShallowDepth()),
blobovniczatree.WithBlobovniczaShallowWidth(cfg.ShallowWidth()),
blobovniczatree.WithOpenedCacheSize(cfg.OpenedCacheSize()),
blobovniczatree.WithLogger(&logger.Logger{Logger: log}),
),
Policy: func(_ *objectSDK.Object, data []byte) bool {
return uint64(len(data)) < sc.SmallSizeLimit()
},
}
substorages = append(substorages, ss)
case fstree.Type:
cfg := fstreeconfig.From((*config.Config)(scfg))
ss := blobstor.SubStorage{
Storage: fstree.New(
fstree.WithPath(scfg.Path()),
fstree.WithPerm(scfg.Perm()),
fstree.WithDepth(cfg.Depth()),
fstree.WithNoSync(cfg.NoSync()),
),
Policy: func(_ *objectSDK.Object, data []byte) bool {
return true
},
}
substorages = append(substorages, ss)
default:
return fmt.Errorf("invalid storage type: %s", scfg.Type())
}
}
opts = append(opts, shard.WithBlobStorOptions(
blobstor.WithCompressObjects(sc.Compress()),
blobstor.WithUncompressableContentTypes(sc.UncompressableContentTypes()),
blobstor.WithStorages(substorages),
blobstor.WithLogger(&logger.Logger{Logger: log}),
))
}
// write cache
if wc := sc.WriteCache(); wc.Enabled() {
opts = append(opts,
shard.WithWriteCache(true),
shard.WithWriteCacheOptions(
[]writecache.Option{
writecache.WithPath(wc.Path()),
writecache.WithMaxBatchSize(wc.BoltDB().MaxBatchSize()),
writecache.WithMaxBatchDelay(wc.BoltDB().MaxBatchDelay()),
writecache.WithMaxObjectSize(wc.MaxObjectSize()),
writecache.WithSmallObjectSize(wc.SmallObjectSize()),
writecache.WithFlushWorkersCount(wc.WorkerCount()),
writecache.WithMaxCacheSize(wc.SizeLimit()),
writecache.WithNoSync(wc.NoSync()),
writecache.WithLogger(&logger.Logger{Logger: log}),
},
),
)
}
// tree
if config.BoolSafe(c.Sub("tree"), "enabled") {
pr := sc.Pilorama()
opts = append(opts, shard.WithPiloramaOptions(
pilorama.WithPath(pr.Path()),
pilorama.WithPerm(pr.Perm()),
pilorama.WithMaxBatchSize(pr.MaxBatchSize()),
pilorama.WithMaxBatchDelay(pr.MaxBatchDelay()),
pilorama.WithNoSync(pr.NoSync()),
))
}
// metabase
{
mb := sc.Metabase()
opts = append(opts, shard.WithMetaBaseOptions(
metabase.WithPath(mb.Path()),
metabase.WithPermissions(mb.BoltDB().Perm()),
metabase.WithMaxBatchSize(mb.BoltDB().MaxBatchSize()),
metabase.WithMaxBatchDelay(mb.BoltDB().MaxBatchDelay()),
metabase.WithBoltDBOptions(&bbolt.Options{
Timeout: 1 * time.Second,
}),
metabase.WithEpochState(epochState{}),
metabase.WithLogger(&logger.Logger{Logger: log}),
))
}
// GC
{
gc := sc.GC()
opts = append(opts,
shard.WithGCRemoverSleepInterval(gc.RemoverSleepInterval()),
shard.WithRemoverBatchSize(gc.RemoverBatchSize()),
shard.WithGCWorkerPoolInitializer(func(sz int) util.WorkerPool {
pool, err := ants.NewPool(sz)
if err != nil {
panic(err)
}
return pool
}),
)
}
shOpts = append(shOpts, opts)
return nil
})
if err != nil {
return nil, nil, fmt.Errorf("iterate shards: %w", err)
}
return ngOpts, shOpts, nil
}
// ParseOrCreateKey parses the provided key as a hex string or creates a fresh one if empty.
func ParseOrCreateKey(hexKeyStr string) (*keys.PrivateKey, error) {
if hexKeyStr != "" {
return keys.NewPrivateKeyFromHex(hexKeyStr)
}
return keys.NewPrivateKey()
}

View file

@ -1,46 +0,0 @@
package rawclient
import (
"crypto/ecdsa"
"time"
)
type (
PutHandler func(uint64, error, time.Duration)
GetHandler func(uint64, error, time.Duration)
DeleteHandler func(error, time.Duration)
)
type config struct {
key ecdsa.PrivateKey
onPut PutHandler
onGet GetHandler
onDelete DeleteHandler
}
type Option func(*config)
func defaultConfig() *config {
return &config{
onPut: func(uint64, error, time.Duration) {},
onGet: func(uint64, error, time.Duration) {},
onDelete: func(error, time.Duration) {},
}
}
// WithKey sets the private key used by the raw client if no other key
// is available when setting owner IDs.
// Required.
func WithKey(key ecdsa.PrivateKey) Option { return func(c *config) { c.key = key } }
// WithPutHandler sets the hook invoked on completion of Put calls.
// This is useful for updating metrics or debugging.
func WithPutHandler(h PutHandler) Option { return func(c *config) { c.onPut = h } }
// WithGetHandler sets the hook invoked on completion of Get calls.
// This is useful for updating metrics or debugging.
func WithGetHandler(h GetHandler) Option { return func(c *config) { c.onGet = h } }
// WithDeleteHandler sets the hook invoked on completion of Delete calls.
// This is useful for updating metrics or debugging.
func WithDeleteHandler(h DeleteHandler) Option { return func(c *config) { c.onDelete = h } }

View file

@ -1,121 +0,0 @@
// Package rawclient provides a basic interface to the local storage engine.
// It can be used as a base for more complex load clients backed by local storage.
package rawclient
import (
"context"
"fmt"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
)
// RawClient is a client to the local storage engine instance.
type RawClient struct {
*config
ng *engine.StorageEngine
ownerID *user.ID
}
// New returns a RawClient from the provided options.
func New(ng *engine.StorageEngine, opts ...Option) *RawClient {
cfg := defaultConfig()
for _, opt := range opts {
opt(cfg)
}
client := &RawClient{cfg, ng, &user.ID{}}
user.IDFromKey(client.ownerID, client.key.PublicKey)
return client
}
func (c *RawClient) Put(ctx context.Context, containerID cid.ID, ownerID *user.ID, headers map[string]string, payload []byte) (oid.ID, error) {
sz := len(payload)
attrs := make([]object.Attribute, len(headers))
{
ind := 0
for k, v := range headers {
attrs[ind].SetKey(k)
attrs[ind].SetValue(v)
ind++
}
}
// Note that the key is a required option, so this is never empty.
if ownerID == nil {
ownerID = c.ownerID
}
obj := object.New()
obj.SetContainerID(containerID)
obj.SetOwnerID(*ownerID)
obj.SetAttributes(attrs...)
obj.SetPayload(payload)
obj.SetPayloadSize(uint64(sz))
object.CalculateAndSetPayloadChecksum(obj) // needed for metabase key
id, err := object.CalculateID(obj)
if err != nil {
return oid.ID{}, fmt.Errorf("calculating object id: %v", err)
}
obj.SetID(id)
if err := object.CalculateAndSetSignature(c.key, obj); err != nil {
return oid.ID{}, fmt.Errorf("calculating signature: %v", err)
}
var req engine.PutPrm
req.WithObject(obj)
start := time.Now()
err = c.ng.Put(ctx, req)
c.onPut(uint64(sz), err, time.Since(start))
if err != nil {
return oid.ID{}, err
}
return id, nil
}
func (c *RawClient) Get(ctx context.Context, containerID cid.ID, objectID oid.ID) (*object.Object, error) {
var addr oid.Address
addr.SetContainer(containerID)
addr.SetObject(objectID)
var req engine.GetPrm
req.WithAddress(addr)
start := time.Now()
res, err := c.ng.Get(ctx, req)
var sz uint64
obj := res.Object()
if obj != nil {
sz = uint64(len(obj.Payload()))
}
c.onGet(sz, err, time.Since(start))
return obj, nil
}
func (c *RawClient) Delete(ctx context.Context, containerID cid.ID, objectID oid.ID) error {
var addr oid.Address
addr.SetContainer(containerID)
addr.SetObject(objectID)
var req engine.DeletePrm
req.WithAddress(addr)
start := time.Now()
_, err := c.ng.Delete(ctx, req)
c.onDelete(err, time.Since(start))
return err
}
func (c *RawClient) OwnerID() *user.ID {
return c.ownerID
}

View file

@ -1,58 +0,0 @@
package native
import (
"context"
"sync"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
)
const networkCacheTTL = time.Minute
var networkInfoCache = &networkInfoCacheT{}
type networkInfoCacheT struct {
guard sync.RWMutex
current *netmap.NetworkInfo
fetchTS time.Time
}
func (c *networkInfoCacheT) getOrFetch(ctx context.Context, cli *client.Client) (*netmap.NetworkInfo, error) {
if v := c.get(); v != nil {
return v, nil
}
return c.fetch(ctx, cli)
}
func (c *networkInfoCacheT) get() *netmap.NetworkInfo {
c.guard.RLock()
defer c.guard.RUnlock()
if c.current == nil || time.Since(c.fetchTS) > networkCacheTTL {
return nil
}
return c.current
}
func (c *networkInfoCacheT) fetch(ctx context.Context, cli *client.Client) (*netmap.NetworkInfo, error) {
c.guard.Lock()
defer c.guard.Unlock()
if time.Since(c.fetchTS) <= networkCacheTTL {
return c.current, nil
}
res, err := cli.NetworkInfo(ctx, client.PrmNetworkInfo{})
if err != nil {
return nil, err
}
v := res.Info()
c.current = &v
c.fetchTS = time.Now()
return c.current, nil
}

View file

@ -1,6 +1,7 @@
package native package native
import ( import (
"bytes"
"context" "context"
"crypto/ecdsa" "crypto/ecdsa"
"crypto/sha256" "crypto/sha256"
@ -22,20 +23,19 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version"
"git.frostfs.info/TrueCloudLab/tzhash/tz" "git.frostfs.info/TrueCloudLab/tzhash/tz"
"git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/datagen"
"git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/stats" "git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/stats"
"github.com/dop251/goja"
"go.k6.io/k6/js/modules" "go.k6.io/k6/js/modules"
"go.k6.io/k6/metrics" "go.k6.io/k6/metrics"
) )
type ( type (
Client struct { Client struct {
vu modules.VU vu modules.VU
key ecdsa.PrivateKey key ecdsa.PrivateKey
tok session.Object tok session.Object
cli *client.Client cli *client.Client
prepareLocally bool bufsize int
maxObjSize uint64
} }
PutResponse struct { PutResponse struct {
@ -66,19 +66,30 @@ type (
} }
PreparedObject struct { PreparedObject struct {
vu modules.VU vu modules.VU
key ecdsa.PrivateKey key ecdsa.PrivateKey
cli *client.Client cli *client.Client
hdr object.Object bufsize int
payload []byte
prepareLocally bool hdr object.Object
maxObjSize uint64 payload []byte
} }
) )
const defaultBufferSize = 64 * 1024 const defaultBufferSize = 64 * 1024
func (c *Client) Put(containerID string, headers map[string]string, payload datagen.Payload, chunkSize int) PutResponse { func (c *Client) SetBufferSize(size int) {
if size < 0 {
panic("buffer size must be positive")
}
if size == 0 {
c.bufsize = defaultBufferSize
} else {
c.bufsize = size
}
}
func (c *Client) Put(containerID string, headers map[string]string, payload goja.ArrayBuffer) PutResponse {
cliContainerID := parseContainerID(containerID) cliContainerID := parseContainerID(containerID)
tok := c.tok tok := c.tok
@ -102,10 +113,10 @@ func (c *Client) Put(containerID string, headers map[string]string, payload data
var o object.Object var o object.Object
o.SetContainerID(cliContainerID) o.SetContainerID(cliContainerID)
o.SetOwnerID(owner) o.SetOwnerID(&owner)
o.SetAttributes(attrs...) o.SetAttributes(attrs...)
resp, err := put(c.vu, c.cli, c.prepareLocally, &tok, &o, payload, chunkSize, c.maxObjSize) resp, err := put(c.vu, c.bufsize, c.cli, &tok, &o, payload.Bytes())
if err != nil { if err != nil {
return PutResponse{Success: false, Error: err.Error()} return PutResponse{Success: false, Error: err.Error()}
} }
@ -129,9 +140,9 @@ func (c *Client) Delete(containerID string, objectID string) DeleteResponse {
start := time.Now() start := time.Now()
var prm client.PrmObjectDelete var prm client.PrmObjectDelete
prm.ObjectID = &cliObjectID prm.ByID(cliObjectID)
prm.ContainerID = &cliContainerID prm.FromContainer(cliContainerID)
prm.Session = &tok prm.WithinSession(tok)
_, err = c.cli.ObjectDelete(c.vu.Context(), prm) _, err = c.cli.ObjectDelete(c.vu.Context(), prm)
if err != nil { if err != nil {
@ -139,7 +150,7 @@ func (c *Client) Delete(containerID string, objectID string) DeleteResponse {
return DeleteResponse{Success: false, Error: err.Error()} return DeleteResponse{Success: false, Error: err.Error()}
} }
stats.Report(c.vu, objDeleteSuccess, 1) stats.Report(c.vu, objDeleteTotal, 1)
stats.Report(c.vu, objDeleteDuration, metrics.D(time.Since(start))) stats.Report(c.vu, objDeleteDuration, metrics.D(time.Since(start)))
return DeleteResponse{Success: true} return DeleteResponse{Success: true}
} }
@ -160,12 +171,12 @@ func (c *Client) Get(containerID, objectID string) GetResponse {
start := time.Now() start := time.Now()
var prm client.PrmObjectGet var prm client.PrmObjectGet
prm.ObjectID = &cliObjectID prm.ByID(cliObjectID)
prm.ContainerID = &cliContainerID prm.FromContainer(cliContainerID)
prm.Session = &tok prm.WithinSession(tok)
objSize := 0 var objSize = 0
err = get(c.cli, prm, c.vu.Context(), func(data []byte) { err = get(c.cli, prm, c.vu.Context(), c.bufsize, func(data []byte) {
objSize += len(data) objSize += len(data)
}) })
if err != nil { if err != nil {
@ -173,10 +184,9 @@ func (c *Client) Get(containerID, objectID string) GetResponse {
return GetResponse{Success: false, Error: err.Error()} return GetResponse{Success: false, Error: err.Error()}
} }
stats.Report(c.vu, objGetSuccess, 1) stats.Report(c.vu, objGetTotal, 1)
stats.Report(c.vu, objGetDuration, metrics.D(time.Since(start))) stats.Report(c.vu, objGetDuration, metrics.D(time.Since(start)))
stats.ReportDataReceived(c.vu, float64(objSize)) stats.ReportDataReceived(c.vu, float64(objSize))
stats.Report(c.vu, objGetData, float64(objSize))
return GetResponse{Success: true} return GetResponse{Success: true}
} }
@ -184,9 +194,10 @@ func get(
cli *client.Client, cli *client.Client,
prm client.PrmObjectGet, prm client.PrmObjectGet,
ctx context.Context, ctx context.Context,
bufSize int,
onDataChunk func(chunk []byte), onDataChunk func(chunk []byte),
) error { ) error {
buf := make([]byte, defaultBufferSize) var buf = make([]byte, bufSize)
objectReader, err := cli.ObjectGetInit(ctx, prm) objectReader, err := cli.ObjectGetInit(ctx, prm)
if err != nil { if err != nil {
@ -229,12 +240,12 @@ func (c *Client) VerifyHash(containerID, objectID, expectedHash string) VerifyHa
} }
var prm client.PrmObjectGet var prm client.PrmObjectGet
prm.ObjectID = &cliObjectID prm.ByID(cliObjectID)
prm.ContainerID = &cliContainerID prm.FromContainer(cliContainerID)
prm.Session = &tok prm.WithinSession(tok)
hasher := sha256.New() hasher := sha256.New()
err = get(c.cli, prm, c.vu.Context(), func(data []byte) { err = get(c.cli, prm, c.vu.Context(), c.bufsize, func(data []byte) {
hasher.Write(data) hasher.Write(data)
}) })
if err != nil { if err != nil {
@ -242,7 +253,7 @@ func (c *Client) VerifyHash(containerID, objectID, expectedHash string) VerifyHa
} }
actualHash := hex.EncodeToString(hasher.Sum(nil)) actualHash := hex.EncodeToString(hasher.Sum(nil))
if actualHash != expectedHash { if actualHash != expectedHash {
return VerifyHashResponse{Success: false, Error: "hash mismatch"} return VerifyHashResponse{Success: true, Error: "hash mismatch"}
} }
return VerifyHashResponse{Success: true} return VerifyHashResponse{Success: true}
@ -311,9 +322,10 @@ func (c *Client) PutContainer(params map[string]string) PutContainerResponse {
} }
start := time.Now() start := time.Now()
res, err := c.cli.ContainerPut(c.vu.Context(), client.PrmContainerPut{ var prm client.PrmContainerPut
Container: &cnr, prm.SetContainer(cnr)
})
res, err := c.cli.ContainerPut(c.vu.Context(), prm)
if err != nil { if err != nil {
return c.putCnrErrorResponse(err) return c.putCnrErrorResponse(err)
} }
@ -329,7 +341,7 @@ func (c *Client) PutContainer(params map[string]string) PutContainerResponse {
return PutContainerResponse{Success: true, ContainerID: res.ID().EncodeToString()} return PutContainerResponse{Success: true, ContainerID: res.ID().EncodeToString()}
} }
func (c *Client) Onsite(containerID string, payload datagen.Payload) PreparedObject { func (c *Client) Onsite(containerID string, payload goja.ArrayBuffer) PreparedObject {
maxObjectSize, epoch, hhDisabled, err := parseNetworkInfo(c.vu.Context(), c.cli) maxObjectSize, epoch, hhDisabled, err := parseNetworkInfo(c.vu.Context(), c.cli)
if err != nil { if err != nil {
panic(err) panic(err)
@ -356,7 +368,7 @@ func (c *Client) Onsite(containerID string, payload datagen.Payload) PreparedObj
obj.SetVersion(&apiVersion) obj.SetVersion(&apiVersion)
obj.SetType(object.TypeRegular) obj.SetType(object.TypeRegular)
obj.SetContainerID(cliContainerID) obj.SetContainerID(cliContainerID)
obj.SetOwnerID(owner) obj.SetOwnerID(&owner)
obj.SetPayloadSize(uint64(ln)) obj.SetPayloadSize(uint64(ln))
obj.SetCreationEpoch(epoch) obj.SetCreationEpoch(epoch)
@ -369,13 +381,13 @@ func (c *Client) Onsite(containerID string, payload datagen.Payload) PreparedObj
} }
return PreparedObject{ return PreparedObject{
vu: c.vu, vu: c.vu,
key: c.key, key: c.key,
cli: c.cli, cli: c.cli,
hdr: *obj, bufsize: c.bufsize,
payload: data,
prepareLocally: c.prepareLocally, hdr: *obj,
maxObjSize: c.maxObjSize, payload: data,
} }
} }
@ -401,7 +413,7 @@ func (p PreparedObject) Put(headers map[string]string) PutResponse {
return PutResponse{Success: false, Error: err.Error()} return PutResponse{Success: false, Error: err.Error()}
} }
_, err = put(p.vu, p.cli, p.prepareLocally, nil, &obj, datagen.NewFixedPayload(p.payload), 0, p.maxObjSize) _, err = put(p.vu, p.bufsize, p.cli, nil, &obj, p.payload)
if err != nil { if err != nil {
return PutResponse{Success: false, Error: err.Error()} return PutResponse{Success: false, Error: err.Error()}
} }
@ -409,44 +421,18 @@ func (p PreparedObject) Put(headers map[string]string) PutResponse {
return PutResponse{Success: true, ObjectID: id.String()} return PutResponse{Success: true, ObjectID: id.String()}
} }
type epochSource uint64 func put(vu modules.VU, bufSize int, cli *client.Client, tok *session.Object,
hdr *object.Object, payload []byte) (*client.ResObjectPut, error) {
func (s epochSource) CurrentEpoch() uint64 {
return uint64(s)
}
func put(vu modules.VU, cli *client.Client, prepareLocally bool, tok *session.Object,
hdr *object.Object, payload datagen.Payload, chunkSize int, maxObjSize uint64,
) (*client.ResObjectPut, error) {
bufSize := defaultBufferSize
if chunkSize > 0 {
bufSize = chunkSize
}
buf := make([]byte, bufSize) buf := make([]byte, bufSize)
rdr := payload.Reader() rdr := bytes.NewReader(payload)
sz := payload.Size() sz := rdr.Size()
// starting upload // starting upload
start := time.Now() start := time.Now()
var prm client.PrmObjectPutInit var prm client.PrmObjectPutInit
if tok != nil { if tok != nil {
prm.Session = tok prm.WithinSession(*tok)
}
if chunkSize > 0 {
prm.MaxChunkLength = chunkSize
}
if prepareLocally {
ni, err := networkInfoCache.getOrFetch(vu.Context(), cli)
if err != nil {
return nil, err
}
prm.MaxSize = ni.MaxObjectSize()
prm.EpochSource = epochSource(ni.CurrentEpoch())
prm.WithoutHomomorphHash = true
if maxObjSize > 0 {
prm.MaxSize = maxObjSize
}
} }
objectWriter, err := cli.ObjectPutInit(vu.Context(), prm) objectWriter, err := cli.ObjectPutInit(vu.Context(), prm)
@ -455,30 +441,29 @@ func put(vu modules.VU, cli *client.Client, prepareLocally bool, tok *session.Ob
return nil, err return nil, err
} }
if !objectWriter.WriteHeader(vu.Context(), *hdr) { if !objectWriter.WriteHeader(*hdr) {
stats.Report(vu, objPutFails, 1) stats.Report(vu, objPutFails, 1)
_, err = objectWriter.Close(vu.Context()) _, err = objectWriter.Close()
return nil, err return nil, err
} }
n, _ := rdr.Read(buf) n, _ := rdr.Read(buf)
for n > 0 { for n > 0 {
if !objectWriter.WritePayloadChunk(vu.Context(), buf[:n]) { if !objectWriter.WritePayloadChunk(buf[:n]) {
break break
} }
n, _ = rdr.Read(buf) n, _ = rdr.Read(buf)
} }
resp, err := objectWriter.Close(vu.Context()) resp, err := objectWriter.Close()
if err != nil { if err != nil {
stats.Report(vu, objPutFails, 1) stats.Report(vu, objPutFails, 1)
return nil, err return nil, err
} }
stats.Report(vu, objPutSuccess, 1) stats.Report(vu, objPutTotal, 1)
stats.ReportDataSent(vu, float64(sz)) stats.ReportDataSent(vu, float64(sz))
stats.Report(vu, objPutDuration, metrics.D(time.Since(start))) stats.Report(vu, objPutDuration, metrics.D(time.Since(start)))
stats.Report(vu, objPutData, float64(sz))
return resp, nil return resp, nil
} }
@ -506,9 +491,10 @@ func (x *waitParams) setDefaults() {
func (c *Client) waitForContainerPresence(ctx context.Context, cnrID cid.ID, wp *waitParams) error { func (c *Client) waitForContainerPresence(ctx context.Context, cnrID cid.ID, wp *waitParams) error {
return waitFor(ctx, wp, func(ctx context.Context) bool { return waitFor(ctx, wp, func(ctx context.Context) bool {
_, err := c.cli.ContainerGet(ctx, client.PrmContainerGet{ var prm client.PrmContainerGet
ContainerID: &cnrID, prm.SetContainer(cnrID)
})
_, err := c.cli.ContainerGet(ctx, prm)
return err == nil return err == nil
}) })
} }

View file

@ -8,7 +8,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
frostfsecdsa "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto/ecdsa" frostfsecdsa "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto/ecdsa"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
"git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/stats"
"github.com/google/uuid" "github.com/google/uuid"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys" "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"go.k6.io/k6/js/modules" "go.k6.io/k6/js/modules"
@ -29,10 +28,10 @@ var (
_ modules.Instance = &Native{} _ modules.Instance = &Native{}
_ modules.Module = &RootModule{} _ modules.Module = &RootModule{}
objPutSuccess, objPutFails, objPutDuration, objPutData *metrics.Metric objPutTotal, objPutFails, objPutDuration *metrics.Metric
objGetSuccess, objGetFails, objGetDuration, objGetData *metrics.Metric objGetTotal, objGetFails, objGetDuration *metrics.Metric
objDeleteSuccess, objDeleteFails, objDeleteDuration *metrics.Metric objDeleteTotal, objDeleteFails, objDeleteDuration *metrics.Metric
cnrPutTotal, cnrPutFails, cnrPutDuration *metrics.Metric cnrPutTotal, cnrPutFails, cnrPutDuration *metrics.Metric
) )
func init() { func init() {
@ -52,17 +51,13 @@ func (n *Native) Exports() modules.Exports {
return modules.Exports{Default: n} return modules.Exports{Default: n}
} }
func (n *Native) Connect(endpoint, hexPrivateKey string, dialTimeout, streamTimeout int, prepareLocally bool, maxObjSize int) (*Client, error) { func (n *Native) Connect(endpoint, hexPrivateKey string, dialTimeout, streamTimeout int) (*Client, error) {
var ( var (
cli client.Client cli client.Client
pk *keys.PrivateKey pk *keys.PrivateKey
err error err error
) )
if maxObjSize < 0 {
return nil, fmt.Errorf("max object size value must be positive")
}
pk, err = keys.NewPrivateKey() pk, err = keys.NewPrivateKey()
if len(hexPrivateKey) != 0 { if len(hexPrivateKey) != 0 {
pk, err = keys.NewPrivateKeyFromHex(hexPrivateKey) pk, err = keys.NewPrivateKeyFromHex(hexPrivateKey)
@ -72,30 +67,31 @@ func (n *Native) Connect(endpoint, hexPrivateKey string, dialTimeout, streamTime
} }
var prmInit client.PrmInit var prmInit client.PrmInit
prmInit.Key = pk.PrivateKey prmInit.ResolveFrostFSFailures()
prmInit.SetDefaultPrivateKey(pk.PrivateKey)
cli.Init(prmInit) cli.Init(prmInit)
var prmDial client.PrmDial var prmDial client.PrmDial
prmDial.Endpoint = endpoint prmDial.SetServerURI(endpoint)
if dialTimeout > 0 { if dialTimeout > 0 {
prmDial.DialTimeout = time.Duration(dialTimeout) * time.Second prmDial.SetTimeout(time.Duration(dialTimeout) * time.Second)
} }
if streamTimeout > 0 { if streamTimeout > 0 {
prmDial.StreamTimeout = time.Duration(streamTimeout) * time.Second prmDial.SetStreamTimeout(time.Duration(streamTimeout) * time.Second)
} }
err = cli.Dial(n.vu.Context(), prmDial) err = cli.Dial(prmDial)
if err != nil { if err != nil {
return nil, fmt.Errorf("dial endpoint: %s %w", endpoint, err) return nil, fmt.Errorf("dial endpoint: %s %w", endpoint, err)
} }
// generate session token // generate session token
exp := uint64(math.MaxUint64) exp := uint64(math.MaxUint64)
sessionResp, err := cli.SessionCreate(n.vu.Context(), client.PrmSessionCreate{ var prmSessionCreate client.PrmSessionCreate
Expiration: exp, prmSessionCreate.SetExp(exp)
}) sessionResp, err := cli.SessionCreate(n.vu.Context(), prmSessionCreate)
if err != nil { if err != nil {
return nil, fmt.Errorf("dial endpoint: %s %w", endpoint, err) return nil, fmt.Errorf("dial endpoint: %s %w", endpoint, err)
} }
@ -118,42 +114,29 @@ func (n *Native) Connect(endpoint, hexPrivateKey string, dialTimeout, streamTime
tok.SetAuthKey(&key) tok.SetAuthKey(&key)
tok.SetExp(exp) tok.SetExp(exp)
if prepareLocally && maxObjSize > 0 {
res, err := cli.NetworkInfo(n.vu.Context(), client.PrmNetworkInfo{})
if err != nil {
return nil, err
}
if uint64(maxObjSize) > res.Info().MaxObjectSize() {
return nil, fmt.Errorf("max object size must be not greater than %d bytes", res.Info().MaxObjectSize())
}
}
// register metrics // register metrics
registry := metrics.NewRegistry()
objPutTotal, _ = registry.NewMetric("frostfs_obj_put_total", metrics.Counter)
objPutFails, _ = registry.NewMetric("frostfs_obj_put_fails", metrics.Counter)
objPutDuration, _ = registry.NewMetric("frostfs_obj_put_duration", metrics.Trend, metrics.Time)
objPutSuccess, _ = stats.Registry.NewMetric("frostfs_obj_put_success", metrics.Counter) objGetTotal, _ = registry.NewMetric("frostfs_obj_get_total", metrics.Counter)
objPutFails, _ = stats.Registry.NewMetric("frostfs_obj_put_fails", metrics.Counter) objGetFails, _ = registry.NewMetric("frostfs_obj_get_fails", metrics.Counter)
objPutDuration, _ = stats.Registry.NewMetric("frostfs_obj_put_duration", metrics.Trend, metrics.Time) objGetDuration, _ = registry.NewMetric("frostfs_obj_get_duration", metrics.Trend, metrics.Time)
objPutData, _ = stats.Registry.NewMetric("frostfs_obj_put_bytes", metrics.Counter, metrics.Data)
objGetSuccess, _ = stats.Registry.NewMetric("frostfs_obj_get_success", metrics.Counter) objDeleteTotal, _ = registry.NewMetric("frostfs_obj_delete_total", metrics.Counter)
objGetFails, _ = stats.Registry.NewMetric("frostfs_obj_get_fails", metrics.Counter) objDeleteFails, _ = registry.NewMetric("frostfs_obj_delete_fails", metrics.Counter)
objGetDuration, _ = stats.Registry.NewMetric("frostfs_obj_get_duration", metrics.Trend, metrics.Time) objDeleteDuration, _ = registry.NewMetric("frostfs_obj_delete_duration", metrics.Trend, metrics.Time)
objGetData, _ = stats.Registry.NewMetric("frostfs_obj_get_bytes", metrics.Counter, metrics.Data)
objDeleteSuccess, _ = stats.Registry.NewMetric("frostfs_obj_delete_success", metrics.Counter) cnrPutTotal, _ = registry.NewMetric("frostfs_cnr_put_total", metrics.Counter)
objDeleteFails, _ = stats.Registry.NewMetric("frostfs_obj_delete_fails", metrics.Counter) cnrPutFails, _ = registry.NewMetric("frostfs_cnr_put_fails", metrics.Counter)
objDeleteDuration, _ = stats.Registry.NewMetric("frostfs_obj_delete_duration", metrics.Trend, metrics.Time) cnrPutDuration, _ = registry.NewMetric("frostfs_cnr_put_duration", metrics.Trend, metrics.Time)
cnrPutTotal, _ = stats.Registry.NewMetric("frostfs_cnr_put_total", metrics.Counter)
cnrPutFails, _ = stats.Registry.NewMetric("frostfs_cnr_put_fails", metrics.Counter)
cnrPutDuration, _ = stats.Registry.NewMetric("frostfs_cnr_put_duration", metrics.Trend, metrics.Time)
return &Client{ return &Client{
vu: n.vu, vu: n.vu,
key: pk.PrivateKey, key: pk.PrivateKey,
tok: tok, tok: tok,
cli: &cli, cli: &cli,
prepareLocally: prepareLocally, bufsize: defaultBufferSize,
maxObjSize: uint64(maxObjSize),
}, nil }, nil
} }

View file

@ -1,67 +0,0 @@
// Package profile provides an extension to generate profile data from k6 itself.
//
// An Output extension is used to leverage the Start and Stop hooks which are
// otherwise inaccessible in a regular module.
package profile
import (
"fmt"
"os"
"runtime"
"runtime/pprof"
"go.k6.io/k6/metrics"
"go.k6.io/k6/output"
)
const (
cpuProfilePath = "cpu.prof"
memProfilePath = "mem.prof"
)
type profExt struct {
cpuFile *os.File
}
func New(output.Params) (output.Output, error) {
return &profExt{}, nil
}
func (*profExt) Description() string {
return "profile"
}
func (ext *profExt) Start() error {
var err error
ext.cpuFile, err = os.Create(cpuProfilePath)
if err != nil {
return fmt.Errorf("creating cpu profile file: %v", err)
}
if err := pprof.StartCPUProfile(ext.cpuFile); err != nil {
return fmt.Errorf("starting cpu profile: %v", err)
}
return nil
}
func (ext *profExt) Stop() error {
pprof.StopCPUProfile()
if err := ext.cpuFile.Close(); err != nil {
return fmt.Errorf("closing cpu profile file: %v", err)
}
f, err := os.Create(memProfilePath)
if err != nil {
return fmt.Errorf("creating mem profile file: %v", err)
}
defer f.Close()
runtime.GC()
if err := pprof.WriteHeapProfile(f); err != nil {
return fmt.Errorf("writing mem profile: %v", err)
}
return nil
}
func (*profExt) AddMetricSamples([]metrics.SampleContainer) {}
func init() {
output.RegisterExtension("profile", New)
}

View file

@ -1,124 +0,0 @@
package registry
import (
"fmt"
"os"
)
type ObjExporter struct {
selector *ObjSelector
}
type PreGenerateInfo struct {
Buckets []string `json:"buckets"`
Containers []string `json:"containers"`
Objects []ObjInfo `json:"objects"`
ObjSize string `json:"obj_size"`
}
type ObjInfo struct {
Bucket string `json:"bucket"`
Object string `json:"object"`
CID string `json:"cid"`
OID string `json:"oid"`
}
func NewObjExporter(selector *ObjSelector) *ObjExporter {
return &ObjExporter{selector: selector}
}
func (o *ObjExporter) ExportJSONPreGen(fileName string) error {
f, err := os.Create(fileName)
if err != nil {
return err
}
defer f.Close()
// there can be a lot of object, so manually form json
if _, err = f.WriteString(`{"objects":[`); err != nil {
return err
}
bucketMap := make(map[string]struct{})
containerMap := make(map[string]struct{})
count, err := o.selector.Count()
if err != nil {
return err
}
var comma string
for i := 0; i < count; i++ {
info := o.selector.NextObject()
if info == nil {
break
}
if err = writeObjectInfo(comma, info, f); err != nil {
return err
}
if i == 0 {
comma = ","
}
if info.S3Bucket != "" {
bucketMap[info.S3Bucket] = struct{}{}
}
if info.CID != "" {
containerMap[info.CID] = struct{}{}
}
}
if _, err = f.WriteString(`]`); err != nil {
return err
}
if len(bucketMap) > 0 {
if err = writeContainerInfo("buckets", bucketMap, f); err != nil {
return err
}
}
if len(containerMap) > 0 {
if err = writeContainerInfo("containers", containerMap, f); err != nil {
return err
}
}
if _, err = f.WriteString(`}`); err != nil {
return err
}
return nil
}
func writeObjectInfo(comma string, info *ObjectInfo, f *os.File) (err error) {
var res string
if info.S3Bucket != "" || info.S3Key != "" {
res = fmt.Sprintf(`%s{"bucket":"%s","object":"%s"}`, comma, info.S3Bucket, info.S3Key)
} else {
res = fmt.Sprintf(`%s{"cid":"%s","oid":"%s"}`, comma, info.CID, info.OID)
}
_, err = f.WriteString(res)
return err
}
func writeContainerInfo(attrName string, bucketMap map[string]struct{}, f *os.File) (err error) {
if _, err = f.WriteString(fmt.Sprintf(`,"%s":[`, attrName)); err != nil {
return err
}
i := 0
comma := ""
for bucket := range bucketMap {
if _, err = f.WriteString(fmt.Sprintf(`%s"%s"`, comma, bucket)); err != nil {
return err
}
if i == 0 {
comma = ","
}
i++
}
_, err = f.WriteString(`]`)
return err
}

View file

@ -1,156 +0,0 @@
package registry
import (
"context"
"encoding/json"
"fmt"
"math/rand"
"os"
"path/filepath"
"slices"
"strings"
"testing"
"github.com/stretchr/testify/require"
)
type expectedResult struct {
mode string
objects []ObjectInfo
dir string
dbName string
jsonName string
}
func TestObjectExporter(t *testing.T) {
names := []string{"s3", "grpc"}
for _, name := range names {
t.Run(name, runExportTest)
t.Run(name+"-changed", runExportChangedTest)
t.Run(name+"-empty", runExportEmptyTest)
}
}
func runExportTest(t *testing.T) {
expected := getExpectedResult(t)
objReg := getFilledRegistry(t, expected)
objExp := NewObjExporter(NewObjSelector(objReg, 0, SelectorOneshot, &ObjFilter{Status: statusCreated}))
require.NoError(t, objExp.ExportJSONPreGen(expected.jsonName))
require.NoError(t, checkExported(expected.objects, expected.jsonName))
}
func runExportChangedTest(t *testing.T) {
expected := getExpectedResult(t)
objReg := getFilledRegistry(t, expected)
newStatus := randString(10)
num := randPositiveInt(1, len(expected.objects))
changedObjects := make([]ObjectInfo, num)
require.Equal(t, num, copy(changedObjects[:], expected.objects[:]))
sel := NewObjSelector(objReg, 0, SelectorOneshot, &ObjFilter{Status: statusCreated})
for i := range changedObjects {
changedObjects[i].Status = newStatus
require.NoError(t, objReg.SetObjectStatus(sel.NextObject().Id, statusCreated, newStatus))
}
objExp := NewObjExporter(NewObjSelector(objReg, 0, SelectorOneshot, &ObjFilter{Status: newStatus}))
require.NoError(t, objExp.ExportJSONPreGen(expected.jsonName))
require.NoError(t, checkExported(changedObjects, expected.jsonName))
}
func runExportEmptyTest(t *testing.T) {
expected := getExpectedResult(t)
expected.objects = make([]ObjectInfo, 0)
objReg := getFilledRegistry(t, expected)
objExp := NewObjExporter(NewObjSelector(objReg, 0, SelectorOneshot, &ObjFilter{Status: statusCreated}))
require.NoError(t, objExp.ExportJSONPreGen(expected.jsonName))
require.NoError(t, checkExported(expected.objects, expected.jsonName))
}
func getExpectedResult(t *testing.T) expectedResult {
num := randPositiveInt(2, 100)
mode := getMode(t.Name())
require.NotEqual(t, "", mode, "test mode should contain either \"s3\" or\"grpc\"")
dir := t.TempDir()
res := expectedResult{
mode: mode,
objects: generateObjectInfo(num, t.Name()),
dir: dir,
dbName: filepath.Join(dir, "registry-"+mode+".db"),
jsonName: filepath.Join(dir, "registry-"+mode+".json"),
}
return res
}
func randPositiveInt(min, max int) int {
return rand.Intn(max-min) + min
}
func getMode(name string) (res string) {
if strings.Contains(name, "s3") {
res = filepath.Base(name)
}
if strings.Contains(name, "grpc") {
res = filepath.Base(name)
}
return res
}
func generateObjectInfo(num int, mode string) []ObjectInfo {
res := make([]ObjectInfo, num)
for i := range res {
res[i] = randomObjectInfo()
if !strings.Contains(mode, "s3") {
res[i].S3Bucket = ""
res[i].S3Key = ""
}
if !strings.Contains(mode, "grpc") {
res[i].CID = ""
res[i].OID = ""
}
}
return res
}
func getFilledRegistry(t *testing.T, expected expectedResult) *ObjRegistry {
objReg := NewObjRegistry(context.Background(), expected.dbName)
for i := range expected.objects {
require.NoError(t, objReg.AddObject(expected.objects[i].CID, expected.objects[i].OID, expected.objects[i].S3Bucket, expected.objects[i].S3Key, expected.objects[i].PayloadHash))
}
return objReg
}
func checkExported(expected []ObjectInfo, fileName string) error {
file, err := os.ReadFile(fileName)
if err != nil {
return err
}
if !json.Valid(file) {
return fmt.Errorf("exported json file %s is invalid", fileName)
}
var actual PreGenerateInfo
if json.Unmarshal(file, &actual) != nil {
return err
}
if len(expected) != len(actual.Objects) {
return fmt.Errorf("expected len(): %v, got len(): %v", len(expected), len(actual.Objects))
}
for i := range expected {
if !slices.ContainsFunc(actual.Objects, func(oi ObjInfo) bool {
compareS3 := oi.Bucket == expected[i].S3Bucket && oi.Object == expected[i].S3Key
comparegRPC := oi.CID == expected[i].CID && oi.OID == expected[i].OID
return compareS3 && comparegRPC
}) {
return fmt.Errorf("object %v not found in exported json file %s", expected[i], fileName)
}
}
return nil
}

View file

@ -1,61 +0,0 @@
package registry
import (
"github.com/nspcc-dev/neo-go/pkg/io"
)
// ObjectInfo represents information about FrostFS object that has been created
// via gRPC/HTTP/S3 API.
type ObjectInfo struct {
Id uint64 // Identifier in bolt DB
CreatedAt int64 // UTC seconds from epoch when the object was created
CID string // Container ID in gRPC/HTTP
OID string // Object ID in gRPC/HTTP
S3Bucket string // Bucket name in S3
S3Key string // Object key in S3
Status string // Status of the object
PayloadHash string // SHA256 hash of object payload that can be used for verification
}
func (o ObjectInfo) EncodeBinary(w *io.BinWriter) {
o.encodeFilterableFields(w)
w.WriteU64LE(o.Id)
w.WriteString(o.CID)
w.WriteString(o.OID)
w.WriteString(o.S3Bucket)
w.WriteString(o.S3Key)
w.WriteString(o.PayloadHash)
}
func (o ObjectInfo) encodeFilterableFields(w *io.BinWriter) {
w.WriteU64LE(uint64(o.CreatedAt))
w.WriteString(o.Status)
}
func (o *ObjectInfo) DecodeBinary(r *io.BinReader) {
o.decodeFilterableFields(r)
o.Id = r.ReadU64LE()
o.CID = r.ReadString()
o.OID = r.ReadString()
o.S3Bucket = r.ReadString()
o.S3Key = r.ReadString()
o.PayloadHash = r.ReadString()
}
func (o *ObjectInfo) decodeFilterableFields(r *io.BinReader) {
o.CreatedAt = int64(r.ReadU64LE())
o.Status = r.ReadString()
}
func (o ObjectInfo) Marshal() ([]byte, error) {
w := io.NewBufBinWriter()
o.EncodeBinary(w.BinWriter)
err := w.Err // Bytes() sets Err to ErrDrained
return w.Bytes(), err
}
func (o *ObjectInfo) Unmarshal(data []byte) error {
r := io.NewBinReaderFromBuf(data)
o.DecodeBinary(r)
return r.Err
}

View file

@ -1,107 +0,0 @@
package registry
import (
"encoding/json"
"math/rand"
"strings"
"testing"
"github.com/nspcc-dev/neo-go/pkg/io"
"github.com/stretchr/testify/require"
)
func BenchmarkObjectInfoMarshal(b *testing.B) {
obj := randomObjectInfo()
b.Run("json", func(b *testing.B) {
for i := 0; i < b.N; i++ {
_, err := json.Marshal(obj)
if err != nil {
b.FailNow()
}
}
})
b.Run("native", func(b *testing.B) {
for i := 0; i < b.N; i++ {
_, err := obj.Marshal()
if err != nil {
b.FailNow()
}
}
})
}
func BenchmarkObjectInfoUnmarshal(b *testing.B) {
obj := randomObjectInfo()
b.Run("json", func(b *testing.B) {
data, err := json.Marshal(obj)
require.NoError(b, err)
for i := 0; i < b.N; i++ {
var obj ObjectInfo
err := json.Unmarshal(data, &obj)
if err != nil {
b.FailNow()
}
}
})
b.Run("native", func(b *testing.B) {
data, err := obj.Marshal()
require.NoError(b, err)
for i := 0; i < b.N; i++ {
err := obj.Unmarshal(data)
if err != nil {
b.FailNow()
}
}
})
}
func TestObjectInfoMarshal(t *testing.T) {
expected := randomObjectInfo()
data, err := expected.Marshal()
require.NoError(t, err)
var actual ObjectInfo
require.NoError(t, actual.Unmarshal(data))
require.Equal(t, expected, actual)
}
func TestObjectInfoEncodeBinary(t *testing.T) {
expected := randomObjectInfo()
w := io.NewBufBinWriter()
expected.EncodeBinary(w.BinWriter)
require.NoError(t, w.Err)
data := w.Bytes()
r := io.NewBinReaderFromBuf(data)
var actual ObjectInfo
actual.DecodeBinary(r)
require.NoError(t, r.Err)
require.Equal(t, expected, actual)
}
func randomObjectInfo() ObjectInfo {
return ObjectInfo{
CreatedAt: int64(rand.Uint64()),
Status: statusCreated,
Id: rand.Uint64(),
CID: randString(32),
OID: randString(32),
S3Bucket: randString(32),
S3Key: randString(32),
PayloadHash: randString(64),
}
}
func randString(n int) string {
var sb strings.Builder
for i := 0; i < n; i++ {
sb.WriteRune('a' + rune(rand.Int31())%('z'-'a'+1))
}
return sb.String()
}

View file

@ -3,8 +3,8 @@ package registry
import ( import (
"context" "context"
"encoding/binary" "encoding/binary"
"encoding/json"
"errors" "errors"
"fmt"
"os" "os"
"time" "time"
@ -22,6 +22,21 @@ const (
statusCreated = "created" statusCreated = "created"
) )
const bucketName = "_object"
// ObjectInfo represents information about FrostFS object that has been created
// via gRPC/HTTP/S3 API.
type ObjectInfo struct {
Id uint64 // Identifier in bolt DB
CreatedAt time.Time // UTC date&time when the object was created
CID string // Container ID in gRPC/HTTP
OID string // Object ID in gRPC/HTTP
S3Bucket string // Bucket name in S3
S3Key string // Object key in S3
Status string // Status of the object
PayloadHash string // SHA256 hash of object payload that can be used for verification
}
// NewObjRegistry creates a new instance of object registry that stores information // NewObjRegistry creates a new instance of object registry that stores information
// about objects in the specified bolt database. As registry uses read-write // about objects in the specified bolt database. As registry uses read-write
// connection to the database, there may be only one instance of object registry // connection to the database, there may be only one instance of object registry
@ -44,8 +59,8 @@ func NewObjRegistry(ctx context.Context, dbFilePath string) *ObjRegistry {
} }
func (o *ObjRegistry) AddObject(cid, oid, s3Bucket, s3Key, payloadHash string) error { func (o *ObjRegistry) AddObject(cid, oid, s3Bucket, s3Key, payloadHash string) error {
return o.boltDB.Batch(func(tx *bbolt.Tx) error { return o.boltDB.Update(func(tx *bbolt.Tx) error {
b, err := tx.CreateBucketIfNotExists([]byte(statusCreated)) b, err := tx.CreateBucketIfNotExists([]byte(bucketName))
if err != nil { if err != nil {
return err return err
} }
@ -57,7 +72,7 @@ func (o *ObjRegistry) AddObject(cid, oid, s3Bucket, s3Key, payloadHash string) e
object := ObjectInfo{ object := ObjectInfo{
Id: id, Id: id,
CreatedAt: time.Now().UTC().Unix(), CreatedAt: time.Now().UTC(),
CID: cid, CID: cid,
OID: oid, OID: oid,
S3Bucket: s3Bucket, S3Bucket: s3Bucket,
@ -65,55 +80,49 @@ func (o *ObjRegistry) AddObject(cid, oid, s3Bucket, s3Key, payloadHash string) e
PayloadHash: payloadHash, PayloadHash: payloadHash,
Status: statusCreated, Status: statusCreated,
} }
objBytes, err := object.Marshal() objectJson, err := json.Marshal(object)
if err != nil { if err != nil {
return err return err
} }
return b.Put(encodeId(id), objBytes) return b.Put(encodeId(id), objectJson)
}) })
} }
func (o *ObjRegistry) SetObjectStatus(id uint64, oldStatus, newStatus string) error { func (o *ObjRegistry) SetObjectStatus(id uint64, newStatus string) error {
return o.boltDB.Batch(func(tx *bbolt.Tx) error { return o.boltDB.Update(func(tx *bbolt.Tx) error {
oldB := tx.Bucket([]byte(oldStatus)) b, err := tx.CreateBucketIfNotExists([]byte(bucketName))
if oldB == nil { if err != nil {
return fmt.Errorf("bucket doesn't exist: '%s'", oldStatus) return err
} }
key := encodeId(id) objBytes := b.Get(encodeId(id))
objBytes := oldB.Get(key)
if objBytes == nil { if objBytes == nil {
return errors.New("object doesn't exist") return errors.New("object doesn't exist")
} }
if err := oldB.Delete(key); err != nil {
return fmt.Errorf("bucket.Delete: %w", err)
}
obj := new(ObjectInfo) obj := new(ObjectInfo)
if err := obj.Unmarshal(objBytes); err != nil { if err := json.Unmarshal(objBytes, &obj); err != nil {
return err return err
} }
obj.Status = newStatus obj.Status = newStatus
objBytes, err := obj.Marshal() objBytes, err = json.Marshal(obj)
if err != nil { if err != nil {
return err return err
} }
return b.Put(encodeId(id), objBytes)
newB, err := tx.CreateBucketIfNotExists([]byte(newStatus))
if err != nil {
return err
}
return newB.Put(encodeId(id), objBytes)
}) })
} }
func (o *ObjRegistry) DeleteObject(id uint64) error { func (o *ObjRegistry) DeleteObject(id uint64) error {
return o.boltDB.Batch(func(tx *bbolt.Tx) error { return o.boltDB.Update(func(tx *bbolt.Tx) error {
return tx.ForEach(func(_ []byte, b *bbolt.Bucket) error { b, err := tx.CreateBucketIfNotExists([]byte(bucketName))
return b.Delete(encodeId(id)) if err != nil {
}) return err
}
return b.Delete(encodeId(id))
}) })
} }

View file

@ -2,16 +2,13 @@ package registry
import ( import (
"context" "context"
"encoding/json"
"fmt" "fmt"
"sync"
"time" "time"
"github.com/nspcc-dev/neo-go/pkg/io"
"go.etcd.io/bbolt" "go.etcd.io/bbolt"
) )
const nextObjectTimeout = 10 * time.Second
type ObjFilter struct { type ObjFilter struct {
Status string Status string
Age int Age int
@ -23,9 +20,6 @@ type ObjSelector struct {
boltDB *bbolt.DB boltDB *bbolt.DB
filter *ObjFilter filter *ObjFilter
cacheSize int cacheSize int
kind SelectorKind
// Sync synchronizes VU used for deletion.
Sync sync.WaitGroup
} }
// objectSelectCache is the default maximum size of a batch to select from DB. // objectSelectCache is the default maximum size of a batch to select from DB.
@ -33,20 +27,16 @@ const objectSelectCache = 1000
// NewObjSelector creates a new instance of object selector that can iterate over // NewObjSelector creates a new instance of object selector that can iterate over
// objects in the specified registry. // objects in the specified registry.
func NewObjSelector(registry *ObjRegistry, selectionSize int, kind SelectorKind, filter *ObjFilter) *ObjSelector { func NewObjSelector(registry *ObjRegistry, selectionSize int, filter *ObjFilter) *ObjSelector {
if selectionSize <= 0 { if selectionSize <= 0 {
selectionSize = objectSelectCache selectionSize = objectSelectCache
} }
if filter == nil || filter.Status == "" {
panic("filtering without status is not supported")
}
objSelector := &ObjSelector{ objSelector := &ObjSelector{
ctx: registry.ctx, ctx: registry.ctx,
boltDB: registry.boltDB, boltDB: registry.boltDB,
filter: filter, filter: filter,
objChan: make(chan *ObjectInfo, selectionSize*2), objChan: make(chan *ObjectInfo, selectionSize*2),
cacheSize: selectionSize, cacheSize: selectionSize,
kind: kind,
} }
go objSelector.selectLoop() go objSelector.selectLoop()
@ -62,39 +52,22 @@ func NewObjSelector(registry *ObjRegistry, selectionSize int, kind SelectorKind,
// - underlying registry context is done, nil objects will be returned on the // - underlying registry context is done, nil objects will be returned on the
// currently blocked and every further NextObject calls. // currently blocked and every further NextObject calls.
func (o *ObjSelector) NextObject() *ObjectInfo { func (o *ObjSelector) NextObject() *ObjectInfo {
if o.kind == SelectorOneshot { return <-o.objChan
return <-o.objChan
}
select {
case <-time.After(nextObjectTimeout):
return nil
case obj := <-o.objChan:
return obj
}
} }
// Count returns total number of objects that match filter of the selector. // Count returns total number of objects that match filter of the selector.
func (o *ObjSelector) Count() (int, error) { func (o *ObjSelector) Count() (int, error) {
count := 0 var count = 0
err := o.boltDB.View(func(tx *bbolt.Tx) error { err := o.boltDB.View(func(tx *bbolt.Tx) error {
b := tx.Bucket([]byte(o.filter.Status)) b := tx.Bucket([]byte(bucketName))
if b == nil { if b == nil {
return nil return nil
} }
if o.filter.Age == 0 {
count = b.Stats().KeyN
return nil
}
return b.ForEach(func(_, objBytes []byte) error { return b.ForEach(func(_, objBytes []byte) error {
if objBytes != nil { if objBytes != nil {
r := io.NewBinReaderFromBuf(objBytes)
var obj ObjectInfo var obj ObjectInfo
obj.decodeFilterableFields(r) if err := json.Unmarshal(objBytes, &obj); err != nil {
if r.Err != nil {
// Ignore malformed objects // Ignore malformed objects
return nil return nil
} }
@ -122,7 +95,7 @@ func (o *ObjSelector) selectLoop() {
// cache the objects // cache the objects
err := o.boltDB.View(func(tx *bbolt.Tx) error { err := o.boltDB.View(func(tx *bbolt.Tx) error {
b := tx.Bucket([]byte(o.filter.Status)) b := tx.Bucket([]byte(bucketName))
if b == nil { if b == nil {
return nil return nil
} }
@ -147,7 +120,7 @@ func (o *ObjSelector) selectLoop() {
for ; keyBytes != nil && len(cache) != o.cacheSize; keyBytes, objBytes = c.Next() { for ; keyBytes != nil && len(cache) != o.cacheSize; keyBytes, objBytes = c.Next() {
if objBytes != nil { if objBytes != nil {
var obj ObjectInfo var obj ObjectInfo
if err := obj.Unmarshal(objBytes); err != nil { if err := json.Unmarshal(objBytes, &obj); err != nil {
// Ignore malformed objects for now. Maybe it should be panic? // Ignore malformed objects for now. Maybe it should be panic?
continue continue
} }
@ -176,23 +149,15 @@ func (o *ObjSelector) selectLoop() {
} }
} }
if o.kind == SelectorOneshot && len(cache) != o.cacheSize { if len(cache) != o.cacheSize {
return
}
if o.kind != SelectorLooped && len(cache) != o.cacheSize {
// no more objects, wait a little; the logic could be improved. // no more objects, wait a little; the logic could be improved.
select { select {
case <-time.After(time.Second): case <-time.After(time.Second * time.Duration(o.filter.Age/2)):
case <-o.ctx.Done(): case <-o.ctx.Done():
return return
} }
} }
if o.kind == SelectorLooped && len(cache) != o.cacheSize {
lastID = 0
}
// clean handled objects // clean handled objects
cache = cache[:0] cache = cache[:0]
} }
@ -203,8 +168,8 @@ func (f *ObjFilter) match(o ObjectInfo) bool {
return false return false
} }
if f.Age != 0 { if f.Age != 0 {
objAge := time.Now().UTC().Unix() - o.CreatedAt objAge := time.Now().UTC().Sub(o.CreatedAt).Seconds()
if objAge < int64(f.Age) { if objAge < float64(f.Age) {
return false return false
} }
} }

View file

@ -74,35 +74,7 @@ func (r *Registry) open(dbFilePath string) *ObjRegistry {
return registry return registry
} }
// SelectorKind represents selector behaviour when no items are available.
type SelectorKind byte
const (
// SelectorAwaiting waits for a new item to arrive.
// This selector visits each item exactly once and can be used when items
// to select are being pushed into registry concurrently.
SelectorAwaiting = iota
// SelectorLooped rewinds cursor to the start after all items have been read.
// It can encounter duplicates and should be used mostly for read scenarious.
SelectorLooped
// SelectorOneshot visits each item exactly once and exits immediately afterwards.
// It may be used to artificially abort the test after all items were processed.
SelectorOneshot
)
func (r *Registry) GetSelector(dbFilePath string, name string, cacheSize int, filter map[string]string) *ObjSelector { func (r *Registry) GetSelector(dbFilePath string, name string, cacheSize int, filter map[string]string) *ObjSelector {
return r.getSelectorInternal(dbFilePath, name, cacheSize, SelectorAwaiting, filter)
}
func (r *Registry) GetLoopedSelector(dbFilePath string, name string, cacheSize int, filter map[string]string) *ObjSelector {
return r.getSelectorInternal(dbFilePath, name, cacheSize, SelectorLooped, filter)
}
func (r *Registry) GetOneshotSelector(dbFilePath string, name string, cacheSize int, filter map[string]string) *ObjSelector {
return r.getSelectorInternal(dbFilePath, name, cacheSize, SelectorOneshot, filter)
}
func (r *Registry) getSelectorInternal(dbFilePath string, name string, cacheSize int, kind SelectorKind, filter map[string]string) *ObjSelector {
objFilter, err := parseFilter(filter) objFilter, err := parseFilter(filter)
if err != nil { if err != nil {
panic(err) panic(err)
@ -114,7 +86,7 @@ func (r *Registry) getSelectorInternal(dbFilePath string, name string, cacheSize
selector := r.root.selectors[name] selector := r.root.selectors[name]
if selector == nil { if selector == nil {
registry := r.open(dbFilePath) registry := r.open(dbFilePath)
selector = NewObjSelector(registry, cacheSize, kind, objFilter) selector = NewObjSelector(registry, cacheSize, objFilter)
r.root.selectors[name] = selector r.root.selectors[name] = selector
} else if !reflect.DeepEqual(selector.filter, objFilter) { } else if !reflect.DeepEqual(selector.filter, objFilter) {
panic(fmt.Sprintf("selector %s already has been created with a different filter", name)) panic(fmt.Sprintf("selector %s already has been created with a different filter", name))
@ -122,10 +94,6 @@ func (r *Registry) getSelectorInternal(dbFilePath string, name string, cacheSize
return selector return selector
} }
func (r *Registry) GetExporter(selector *ObjSelector) *ObjExporter {
return NewObjExporter(selector)
}
func parseFilter(filter map[string]string) (*ObjFilter, error) { func parseFilter(filter map[string]string) (*ObjFilter, error) {
objFilter := ObjFilter{} objFilter := ObjFilter{}
objFilter.Status = filter["status"] objFilter.Status = filter["status"]

View file

@ -1,19 +1,18 @@
package s3 package s3
import ( import (
"bytes"
"context" "context"
"crypto/sha256" "crypto/sha256"
"encoding/hex" "encoding/hex"
"fmt"
"strconv" "strconv"
"time" "time"
"git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/datagen"
"git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/stats" "git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/stats"
"github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/feature/s3/manager"
"github.com/aws/aws-sdk-go-v2/service/s3" "github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/dop251/goja"
"go.k6.io/k6/js/modules" "go.k6.io/k6/js/modules"
"go.k6.io/k6/metrics" "go.k6.io/k6/metrics"
) )
@ -50,9 +49,9 @@ type (
} }
) )
func (c *Client) Put(bucket, key string, payload datagen.Payload) PutResponse { func (c *Client) Put(bucket, key string, payload goja.ArrayBuffer) PutResponse {
rdr := payload.Reader() rdr := bytes.NewReader(payload.Bytes())
sz := payload.Size() sz := rdr.Size()
start := time.Now() start := time.Now()
_, err := c.cli.PutObject(c.vu.Context(), &s3.PutObjectInput{ _, err := c.cli.PutObject(c.vu.Context(), &s3.PutObjectInput{
@ -65,44 +64,9 @@ func (c *Client) Put(bucket, key string, payload datagen.Payload) PutResponse {
return PutResponse{Success: false, Error: err.Error()} return PutResponse{Success: false, Error: err.Error()}
} }
stats.Report(c.vu, objPutSuccess, 1) stats.Report(c.vu, objPutTotal, 1)
stats.ReportDataSent(c.vu, float64(sz)) stats.ReportDataSent(c.vu, float64(sz))
stats.Report(c.vu, objPutDuration, metrics.D(time.Since(start))) stats.Report(c.vu, objPutDuration, metrics.D(time.Since(start)))
stats.Report(c.vu, objPutData, float64(sz))
return PutResponse{Success: true}
}
const multipartUploadMinPartSize = 5 * 1024 * 1024 // 5MB
func (c *Client) Multipart(bucket, key string, objPartSize, concurrency int, payload datagen.Payload) PutResponse {
if objPartSize < multipartUploadMinPartSize {
stats.Report(c.vu, objPutFails, 1)
return PutResponse{Success: false, Error: fmt.Sprintf("part size '%d' must be greater than '%d'(5 MB)", objPartSize, multipartUploadMinPartSize)}
}
start := time.Now()
uploader := manager.NewUploader(c.cli, func(u *manager.Uploader) {
u.PartSize = int64(objPartSize)
u.Concurrency = concurrency
})
payloadReader := payload.Reader()
sz := payload.Size()
_, err := uploader.Upload(c.vu.Context(), &s3.PutObjectInput{
Bucket: aws.String(bucket),
Key: aws.String(key),
Body: payloadReader,
})
if err != nil {
stats.Report(c.vu, objPutFails, 1)
return PutResponse{Success: false, Error: err.Error()}
}
stats.Report(c.vu, objPutSuccess, 1)
stats.ReportDataSent(c.vu, float64(sz))
stats.Report(c.vu, objPutDuration, metrics.D(time.Since(start)))
stats.Report(c.vu, objPutData, float64(sz))
return PutResponse{Success: true} return PutResponse{Success: true}
} }
@ -118,7 +82,7 @@ func (c *Client) Delete(bucket, key string) DeleteResponse {
return DeleteResponse{Success: false, Error: err.Error()} return DeleteResponse{Success: false, Error: err.Error()}
} }
stats.Report(c.vu, objDeleteSuccess, 1) stats.Report(c.vu, objDeleteTotal, 1)
stats.Report(c.vu, objDeleteDuration, metrics.D(time.Since(start))) stats.Report(c.vu, objDeleteDuration, metrics.D(time.Since(start)))
return DeleteResponse{Success: true} return DeleteResponse{Success: true}
} }
@ -126,7 +90,7 @@ func (c *Client) Delete(bucket, key string) DeleteResponse {
func (c *Client) Get(bucket, key string) GetResponse { func (c *Client) Get(bucket, key string) GetResponse {
start := time.Now() start := time.Now()
objSize := 0 var objSize = 0
err := get(c.cli, bucket, key, func(chunk []byte) { err := get(c.cli, bucket, key, func(chunk []byte) {
objSize += len(chunk) objSize += len(chunk)
}) })
@ -135,77 +99,12 @@ func (c *Client) Get(bucket, key string) GetResponse {
return GetResponse{Success: false, Error: err.Error()} return GetResponse{Success: false, Error: err.Error()}
} }
stats.Report(c.vu, objGetSuccess, 1) stats.Report(c.vu, objGetTotal, 1)
stats.Report(c.vu, objGetDuration, metrics.D(time.Since(start))) stats.Report(c.vu, objGetDuration, metrics.D(time.Since(start)))
stats.ReportDataReceived(c.vu, float64(objSize)) stats.ReportDataReceived(c.vu, float64(objSize))
stats.Report(c.vu, objGetData, float64(objSize))
return GetResponse{Success: true} return GetResponse{Success: true}
} }
// DeleteObjectVersion deletes object version with specified versionID.
// If version argument is empty, deletes all versions and delete-markers of specified object.
func (c *Client) DeleteObjectVersion(bucket, key, version string) DeleteResponse {
var toDelete []types.ObjectIdentifier
if version != "" {
toDelete = append(toDelete, types.ObjectIdentifier{
Key: aws.String(key),
VersionId: aws.String(version),
})
} else {
versions, err := c.cli.ListObjectVersions(c.vu.Context(), &s3.ListObjectVersionsInput{
Bucket: aws.String(bucket),
Prefix: aws.String(key),
})
if err != nil {
stats.Report(c.vu, objDeleteFails, 1)
return DeleteResponse{Success: false, Error: err.Error()}
}
toDelete = filterObjectVersions(versions, key)
}
if len(toDelete) == 0 {
return c.Delete(bucket, key)
} else {
_, err := c.cli.DeleteObjects(c.vu.Context(), &s3.DeleteObjectsInput{
Bucket: aws.String(bucket),
Delete: &types.Delete{
Objects: toDelete,
Quiet: true,
},
})
if err != nil {
stats.Report(c.vu, objDeleteFails, 1)
return DeleteResponse{Success: false, Error: err.Error()}
}
}
return DeleteResponse{Success: true}
}
func filterObjectVersions(versions *s3.ListObjectVersionsOutput, key string) []types.ObjectIdentifier {
var result []types.ObjectIdentifier
for _, v := range versions.Versions {
if *v.Key == key {
result = append(result, types.ObjectIdentifier{
Key: v.Key,
VersionId: v.VersionId,
})
}
}
for _, marker := range versions.DeleteMarkers {
if *marker.Key == key {
result = append(result, types.ObjectIdentifier{
Key: marker.Key,
VersionId: marker.VersionId,
})
}
}
return result
}
func get( func get(
c *s3.Client, c *s3.Client,
bucket string, bucket string,
@ -244,7 +143,7 @@ func (c *Client) VerifyHash(bucket, key, expectedHash string) VerifyHashResponse
} }
actualHash := hex.EncodeToString(hasher.Sum(nil)) actualHash := hex.EncodeToString(hasher.Sum(nil))
if actualHash != expectedHash { if actualHash != expectedHash {
return VerifyHashResponse{Success: false, Error: "hash mismatch"} return VerifyHashResponse{Success: true, Error: "hash mismatch"}
} }
return VerifyHashResponse{Success: true} return VerifyHashResponse{Success: true}
@ -279,27 +178,7 @@ func (c *Client) CreateBucket(bucket string, params map[string]string) CreateBuc
return CreateBucketResponse{Success: false, Error: err.Error()} return CreateBucketResponse{Success: false, Error: err.Error()}
} }
var versioning bool stats.Report(c.vu, createBucketTotal, 1)
if strVersioned, ok := params["versioning"]; ok {
if versioning, err = strconv.ParseBool(strVersioned); err != nil {
stats.Report(c.vu, createBucketFails, 1)
return CreateBucketResponse{Success: false, Error: err.Error()}
}
}
if versioning {
_, err = c.cli.PutBucketVersioning(context.TODO(), &s3.PutBucketVersioningInput{
Bucket: aws.String(bucket),
VersioningConfiguration: &types.VersioningConfiguration{
Status: types.BucketVersioningStatusEnabled,
},
})
if err != nil {
stats.Report(c.vu, createBucketFails, 1)
return CreateBucketResponse{Success: false, Error: err.Error()}
}
}
stats.Report(c.vu, createBucketSuccess, 1)
stats.Report(c.vu, createBucketDuration, metrics.D(time.Since(start))) stats.Report(c.vu, createBucketDuration, metrics.D(time.Since(start)))
return CreateBucketResponse{Success: true} return CreateBucketResponse{Success: true}
} }

View file

@ -7,7 +7,6 @@ import (
"strconv" "strconv"
"time" "time"
"git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/stats"
"github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/config" "github.com/aws/aws-sdk-go-v2/config"
"github.com/aws/aws-sdk-go-v2/service/s3" "github.com/aws/aws-sdk-go-v2/service/s3"
@ -29,10 +28,10 @@ var (
_ modules.Instance = &S3{} _ modules.Instance = &S3{}
_ modules.Module = &RootModule{} _ modules.Module = &RootModule{}
objPutSuccess, objPutFails, objPutDuration, objPutData *metrics.Metric objPutTotal, objPutFails, objPutDuration *metrics.Metric
objGetSuccess, objGetFails, objGetDuration, objGetData *metrics.Metric objGetTotal, objGetFails, objGetDuration *metrics.Metric
objDeleteSuccess, objDeleteFails, objDeleteDuration *metrics.Metric objDeleteTotal, objDeleteFails, objDeleteDuration *metrics.Metric
createBucketSuccess, createBucketFails, createBucketDuration *metrics.Metric createBucketTotal, createBucketFails, createBucketDuration *metrics.Metric
) )
func init() { func init() {
@ -95,23 +94,22 @@ func (s *S3) Connect(endpoint string, params map[string]string) (*Client, error)
}) })
// register metrics // register metrics
objPutSuccess, _ = stats.Registry.NewMetric("aws_obj_put_success", metrics.Counter) registry := metrics.NewRegistry()
objPutFails, _ = stats.Registry.NewMetric("aws_obj_put_fails", metrics.Counter) objPutTotal, _ = registry.NewMetric("aws_obj_put_total", metrics.Counter)
objPutDuration, _ = stats.Registry.NewMetric("aws_obj_put_duration", metrics.Trend, metrics.Time) objPutFails, _ = registry.NewMetric("aws_obj_put_fails", metrics.Counter)
objPutData, _ = stats.Registry.NewMetric("aws_obj_put_bytes", metrics.Counter, metrics.Data) objPutDuration, _ = registry.NewMetric("aws_obj_put_duration", metrics.Trend, metrics.Time)
objGetSuccess, _ = stats.Registry.NewMetric("aws_obj_get_success", metrics.Counter) objGetTotal, _ = registry.NewMetric("aws_obj_get_total", metrics.Counter)
objGetFails, _ = stats.Registry.NewMetric("aws_obj_get_fails", metrics.Counter) objGetFails, _ = registry.NewMetric("aws_obj_get_fails", metrics.Counter)
objGetDuration, _ = stats.Registry.NewMetric("aws_obj_get_duration", metrics.Trend, metrics.Time) objGetDuration, _ = registry.NewMetric("aws_obj_get_duration", metrics.Trend, metrics.Time)
objGetData, _ = stats.Registry.NewMetric("aws_obj_get_bytes", metrics.Counter, metrics.Data)
objDeleteSuccess, _ = stats.Registry.NewMetric("aws_obj_delete_success", metrics.Counter) objDeleteTotal, _ = registry.NewMetric("aws_obj_delete_total", metrics.Counter)
objDeleteFails, _ = stats.Registry.NewMetric("aws_obj_delete_fails", metrics.Counter) objDeleteFails, _ = registry.NewMetric("aws_obj_delete_fails", metrics.Counter)
objDeleteDuration, _ = stats.Registry.NewMetric("aws_obj_delete_duration", metrics.Trend, metrics.Time) objDeleteDuration, _ = registry.NewMetric("aws_obj_delete_duration", metrics.Trend, metrics.Time)
createBucketSuccess, _ = stats.Registry.NewMetric("aws_create_bucket_success", metrics.Counter) createBucketTotal, _ = registry.NewMetric("aws_create_bucket_total", metrics.Counter)
createBucketFails, _ = stats.Registry.NewMetric("aws_create_bucket_fails", metrics.Counter) createBucketFails, _ = registry.NewMetric("aws_create_bucket_fails", metrics.Counter)
createBucketDuration, _ = stats.Registry.NewMetric("aws_create_bucket_duration", metrics.Trend, metrics.Time) createBucketDuration, _ = registry.NewMetric("aws_create_bucket_duration", metrics.Trend, metrics.Time)
return &Client{ return &Client{
vu: s.vu, vu: s.vu,

View file

@ -1,136 +0,0 @@
package s3local
import (
"time"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/datagen"
"git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/local"
"git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/stats"
"go.k6.io/k6/js/modules"
"go.k6.io/k6/metrics"
)
type Client struct {
vu modules.VU
l layer.Client
ownerID *user.ID
resolver layer.BucketResolver
limiter local.Limiter
}
type (
SuccessOrErrorResponse struct {
Success bool
Abort bool
Error string
}
CreateBucketResponse SuccessOrErrorResponse
PutResponse SuccessOrErrorResponse
DeleteResponse SuccessOrErrorResponse
GetResponse SuccessOrErrorResponse
)
func (c *Client) Put(bucket, key string, payload datagen.Payload) PutResponse {
if c.limiter.IsFull() {
return PutResponse{
Success: false,
Abort: true,
Error: "engine size limit reached",
}
}
cid, err := c.resolver.Resolve(c.vu.Context(), bucket)
if err != nil {
stats.Report(c.vu, objPutFails, 1)
return PutResponse{Error: err.Error()}
}
prm := &layer.PutObjectParams{
BktInfo: &data.BucketInfo{
Name: bucket,
CID: cid,
Owner: *c.ownerID,
Created: time.Now(),
},
Header: map[string]string{},
Object: key,
Size: uint64(payload.Size()),
Reader: payload.Reader(),
}
start := time.Now()
if _, err := c.l.PutObject(c.vu.Context(), prm); err != nil {
stats.Report(c.vu, objPutFails, 1)
return PutResponse{Error: err.Error()}
}
stats.Report(c.vu, objPutDuration, metrics.D(time.Since(start)))
stats.Report(c.vu, objPutSuccess, 1)
stats.ReportDataSent(c.vu, float64(prm.Size))
stats.Report(c.vu, objPutData, float64(prm.Size))
return PutResponse{Success: true}
}
func (c *Client) Get(bucket, key string) GetResponse {
cid, err := c.resolver.Resolve(c.vu.Context(), bucket)
if err != nil {
stats.Report(c.vu, objGetFails, 1)
return GetResponse{Error: err.Error()}
}
start := time.Now()
bktInfo := &data.BucketInfo{
Name: bucket,
CID: cid,
Owner: *c.ownerID,
}
headPrm := &layer.HeadObjectParams{
BktInfo: bktInfo,
Object: key,
}
extInfo, err := c.l.GetExtendedObjectInfo(c.vu.Context(), headPrm)
if err != nil {
stats.Report(c.vu, objGetFails, 1)
return GetResponse{Error: err.Error()}
}
wr := &recvDataReporter{}
getPrm := &layer.GetObjectParams{
BucketInfo: bktInfo,
ObjectInfo: extInfo.ObjectInfo,
Range: &layer.RangeParams{
Start: 0,
End: uint64(extInfo.ObjectInfo.Size),
},
}
objPayload, err := c.l.GetObject(c.vu.Context(), getPrm)
if err != nil {
stats.Report(c.vu, objGetFails, 1)
return GetResponse{Error: err.Error()}
}
err = objPayload.StreamTo(wr)
if err != nil {
stats.Report(c.vu, objGetFails, 1)
return GetResponse{Error: err.Error()}
}
stats.Report(c.vu, objGetDuration, metrics.D(time.Since(start)))
stats.Report(c.vu, objGetSuccess, 1)
stats.ReportDataReceived(c.vu, wr.total)
stats.Report(c.vu, objGetData, wr.total)
return GetResponse{Success: true}
}
type recvDataReporter struct{ total float64 }
func (r *recvDataReporter) Write(p []byte) (int, error) {
r.total += float64(len(p))
return len(p), nil
}

View file

@ -1,93 +0,0 @@
package s3local
import (
"bytes"
"context"
"fmt"
"io"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
"git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/local/rawclient"
)
// frostfs implements the subset of layer.FrostFS needed for clients
// backed by local storage engines. Attempting to call any of the
// unimplemented methods panics.
type frostfs struct {
*rawclient.RawClient
}
func unimplementedMessage(fname string) string {
return fmt.Sprintf("layer.FrostFS.%s is unimplemented and should not be called. If you are seeing "+
"this error, it probably means you tried to use the s3local scenario for "+
"something other than filling a cluster (i.e. PUT or GET).", fname)
}
func (*frostfs) CreateContainer(context.Context, layer.PrmContainerCreate) (*layer.ContainerCreateResult, error) {
panic(unimplementedMessage("CreateContainer"))
}
func (*frostfs) Container(ctx context.Context, prmContainer layer.PrmContainer) (*container.Container, error) {
panic(unimplementedMessage("Container"))
}
func (*frostfs) UserContainers(ctx context.Context, containers layer.PrmUserContainers) ([]cid.ID, error) {
panic(unimplementedMessage("UserContainers"))
}
func (*frostfs) SetContainerEACL(context.Context, eacl.Table, *session.Container) error {
panic(unimplementedMessage("SetContainerEACL"))
}
func (*frostfs) ContainerEACL(ctx context.Context, containerEACL layer.PrmContainerEACL) (*eacl.Table, error) {
panic(unimplementedMessage("ContainerEACL"))
}
func (*frostfs) DeleteContainer(context.Context, cid.ID, *session.Container) error {
panic(unimplementedMessage("DeleteContainer"))
}
func (f *frostfs) ReadObject(ctx context.Context, prm layer.PrmObjectRead) (*layer.ObjectPart, error) {
obj, err := f.Get(ctx, prm.Container, prm.Object)
if err != nil {
return nil, err
}
part := &layer.ObjectPart{}
if prm.WithHeader {
part.Head = obj
}
if prm.WithPayload {
part.Payload = io.NopCloser(bytes.NewReader(obj.Payload()))
}
return part, nil
}
func (f *frostfs) CreateObject(ctx context.Context, prm layer.PrmObjectCreate) (oid.ID, error) {
payload, err := io.ReadAll(prm.Payload)
if err != nil {
return oid.ID{}, fmt.Errorf("reading payload: %v", err)
}
hdrs := map[string]string{}
for _, attr := range prm.Attributes {
hdrs[attr[0]] = attr[1]
}
return f.Put(ctx, prm.Container, nil, hdrs, payload)
}
func (f *frostfs) DeleteObject(context.Context, layer.PrmObjectDelete) error {
panic(unimplementedMessage("DeleteObject"))
}
func (f *frostfs) TimeToEpoch(ctx context.Context, now time.Time, future time.Time) (uint64, uint64, error) {
panic(unimplementedMessage("TimeToEpoch"))
}
func (f *frostfs) SearchObjects(ctx context.Context, search layer.PrmObjectSearch) ([]oid.ID, error) {
panic(unimplementedMessage("SearchObjects"))
}

View file

@ -1,175 +0,0 @@
package s3local
import (
"context"
"flag"
"fmt"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/pkg/service/tree"
"git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/local"
"git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/local/rawclient"
"git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/stats"
"go.k6.io/k6/js/modules"
"go.k6.io/k6/metrics"
"go.uber.org/zap"
)
// RootModule is the global module object type. It is instantiated once per test
// run and will be used to create k6/x/frostfs/s3local module instances for each VU.
type RootModule struct {
m *local.RootModule
}
// Local represents an instance of the module for every VU.
type Local struct {
l *local.Local
}
// Ensure the interfaces are implemented correctly.
var (
_ modules.Module = &RootModule{}
_ modules.Instance = &Local{}
internalObjPutSuccess, internalObjPutFails, internalObjPutDuration, internalObjPutData *metrics.Metric
internalObjGetSuccess, internalObjGetFails, internalObjGetDuration, internalObjGetData *metrics.Metric
objPutSuccess, objPutFails, objPutDuration, objPutData *metrics.Metric
objGetSuccess, objGetFails, objGetDuration, objGetData *metrics.Metric
)
func init() {
modules.Register("k6/x/frostfs/s3local", &RootModule{
m: &local.RootModule{},
})
}
// NewModuleInstance implements the modules.Module interface and returns
// a new instance for each VU.
func (r *RootModule) NewModuleInstance(vu modules.VU) modules.Instance {
return &Local{local.NewLocalModuleInstance(vu, r.m.GetOrCreateEngine)}
}
// Exports implements the modules.Instance interface and returns the exports
// of the JS module.
func (s *Local) Exports() modules.Exports {
return modules.Exports{Default: s}
}
func (s *Local) Connect(configFile string, configDir string, params map[string]string, bucketMapping map[string]string, maxSizeGB int64) (*Client, error) {
// Parse configuration flags.
fs := flag.NewFlagSet("s3local", flag.ContinueOnError)
hexKey := fs.String("hex_key", "", "Private key to use as a hexadecimal string. A random one is created if none is provided")
nodePosition := fs.Int("node_position", 0, "Position of this node in the node array if loading multiple nodes independently")
nodeCount := fs.Int("node_count", 1, "Number of nodes in the node array if loading multiple nodes independently")
debugLogger := fs.Bool("debug_logger", false, "Whether to use the development logger instead of the default one for debugging purposes")
{
args := make([]string, 0, len(params))
for k, v := range params {
args = append(args, fmt.Sprintf("-%s=%s", k, v))
}
if err := fs.Parse(args); err != nil {
return nil, fmt.Errorf("parsing parameters: %v", err)
}
}
// Validate and read configuration flags.
key, err := local.ParseOrCreateKey(*hexKey)
if err != nil {
return nil, fmt.Errorf("parsing hex_key: %v", err)
}
if *nodeCount <= 0 {
return nil, fmt.Errorf("node_count must be positive")
}
if *nodePosition < 0 || *nodePosition >= *nodeCount {
return nil, fmt.Errorf("node_position must be in the range [0, node_count-1]")
}
// Register metrics.
internalObjPutSuccess, _ = stats.Registry.NewMetric("s3local_internal_obj_put_success", metrics.Counter)
internalObjPutFails, _ = stats.Registry.NewMetric("s3local_internal_obj_put_fails", metrics.Counter)
internalObjPutDuration, _ = stats.Registry.NewMetric("s3local_internal_obj_put_duration", metrics.Trend, metrics.Time)
internalObjPutData, _ = stats.Registry.NewMetric("s3local_internal_obj_put_bytes", metrics.Counter, metrics.Data)
internalObjGetSuccess, _ = stats.Registry.NewMetric("s3local_internal_obj_get_success", metrics.Counter)
internalObjGetFails, _ = stats.Registry.NewMetric("s3local_internal_obj_get_fails", metrics.Counter)
internalObjGetDuration, _ = stats.Registry.NewMetric("s3local_internal_obj_get_duration", metrics.Trend, metrics.Time)
internalObjGetData, _ = stats.Registry.NewMetric("s3local_internal_obj_get_bytes", metrics.Counter, metrics.Data)
objPutSuccess, _ = stats.Registry.NewMetric("s3local_obj_put_success", metrics.Counter)
objPutFails, _ = stats.Registry.NewMetric("s3local_obj_put_fails", metrics.Counter)
objPutDuration, _ = stats.Registry.NewMetric("s3local_obj_put_duration", metrics.Trend, metrics.Time)
objPutData, _ = stats.Registry.NewMetric("s3local_obj_put_bytes", metrics.Counter, metrics.Data)
objGetSuccess, _ = stats.Registry.NewMetric("s3local_obj_get_success", metrics.Counter)
objGetFails, _ = stats.Registry.NewMetric("s3local_obj_get_fails", metrics.Counter)
objGetDuration, _ = stats.Registry.NewMetric("s3local_obj_get_duration", metrics.Trend, metrics.Time)
objGetData, _ = stats.Registry.NewMetric("s3local_obj_get_bytes", metrics.Counter, metrics.Data)
// Create S3 layer backed by local storage engine and tree service.
ng, limiter, err := s.l.ResolveEngine(s.l.VU().Context(), configFile, configDir, *debugLogger, maxSizeGB)
if err != nil {
return nil, fmt.Errorf("connecting to engine for config - file %q dir %q: %v", configFile, configDir, err)
}
treeSvc := tree.NewTree(treeServiceEngineWrapper{
ng: ng,
pos: *nodePosition,
size: *nodeCount,
}, zap.L())
rc := rawclient.New(ng,
rawclient.WithKey(key.PrivateKey),
rawclient.WithPutHandler(func(sz uint64, err error, dt time.Duration) {
if err != nil {
stats.Report(s.l.VU(), internalObjPutFails, 1)
} else {
stats.Report(s.l.VU(), internalObjPutSuccess, 1)
stats.Report(s.l.VU(), internalObjPutDuration, metrics.D(dt))
stats.Report(s.l.VU(), internalObjPutData, float64(sz))
}
}),
rawclient.WithGetHandler(func(sz uint64, err error, dt time.Duration) {
if err != nil {
stats.Report(s.l.VU(), internalObjGetFails, 1)
} else {
stats.Report(s.l.VU(), internalObjGetSuccess, 1)
stats.Report(s.l.VU(), internalObjGetDuration, metrics.D(dt))
stats.Report(s.l.VU(), internalObjGetData, float64(sz))
}
}),
)
resolver, err := newFixedBucketResolver(bucketMapping)
if err != nil {
return nil, fmt.Errorf("creating bucket resolver: %v", err)
}
cfg := &layer.Config{
Cache: layer.NewCache(layer.DefaultCachesConfigs(zap.L())),
AnonKey: layer.AnonymousKey{Key: key},
Resolver: resolver,
TreeService: treeSvc,
}
l := layer.NewLayer(zap.L(), &frostfs{rc}, cfg)
err = l.Initialize(s.l.VU().Context(), nopEventListener{})
if err != nil {
return nil, fmt.Errorf("initialize: %w", err)
}
return &Client{
vu: s.l.VU(),
l: l,
ownerID: rc.OwnerID(),
resolver: resolver,
limiter: limiter,
}, nil
}
type nopEventListener struct{}
func (nopEventListener) Subscribe(context.Context, string, layer.MsgHandler) error { return nil }
func (nopEventListener) Listen(context.Context) {}

View file

@ -1,32 +0,0 @@
package s3local
import (
"context"
"fmt"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
)
// fixedBucketResolver is a static bucket resolver from the provided map.
// This is needed to replace the normal resolver for local storage engine clients, since
// those should not use DNS or NNS for resolution.
type fixedBucketResolver map[string]cid.ID
func newFixedBucketResolver(bucketMapping map[string]string) (fixedBucketResolver, error) {
r := fixedBucketResolver{}
for bucket, cidStr := range bucketMapping {
var id cid.ID
if err := id.DecodeString(cidStr); err != nil {
return nil, fmt.Errorf("decoding container id %q: %v", cidStr, err)
}
r[bucket] = id
}
return r, nil
}
func (r fixedBucketResolver) Resolve(_ context.Context, bucket string) (cid.ID, error) {
if cid, resolved := r[bucket]; resolved {
return cid, nil
}
return cid.ID{}, fmt.Errorf("bucket %s is not mapped to any container", bucket)
}

View file

@ -1,217 +0,0 @@
package s3local
import (
"context"
"errors"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/pkg/service/tree"
)
// treeServiceEngineWrapper implements the basic functioning of tree service using
// only the local storage engine instance. The node position and count is fixed
// beforehand in order to coordinate multiple runs on different nodes of the same
// cluster.
//
// The implementation mostly emulates the following
//
// - https://git.frostfs.info/TrueCloudLab/frostfs-node/src/branch/master/pkg/services/tree/service.go
// - https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/src/branch/master/internal/frostfs/services/tree_client_grpc.go
//
// but skips details which are irrelevant for local storage engine-backed clients.
type treeServiceEngineWrapper struct {
ng *engine.StorageEngine
pos int
size int
}
type kv struct {
k string
v []byte
}
func (kv kv) GetKey() string { return kv.k }
func (kv kv) GetValue() []byte { return kv.v }
type nodeResponse struct {
meta []tree.Meta
nodeID uint64
parentID uint64
ts uint64
}
func (r nodeResponse) GetMeta() []tree.Meta { return r.meta }
func (r nodeResponse) GetNodeID() uint64 { return r.nodeID }
func (r nodeResponse) GetParentID() uint64 { return r.parentID }
func (r nodeResponse) GetTimestamp() uint64 { return r.ts }
func (s treeServiceEngineWrapper) GetNodes(ctx context.Context, p *tree.GetNodesParams) ([]tree.NodeResponse, error) {
nodeIDs, err := s.ng.TreeGetByPath(ctx, p.BktInfo.CID, p.TreeID, pilorama.AttributeFilename, p.Path, p.LatestOnly)
if err != nil {
if errors.Is(err, pilorama.ErrTreeNotFound) {
// This is needed in order for the tree implementation to create the tree/node
// if it doesn't exist already.
// See: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/src/branch/master/internal/frostfs/services/tree_client_grpc.go#L306
return nil, tree.ErrNodeNotFound
}
return nil, err
}
resps := make([]tree.NodeResponse, 0, len(nodeIDs))
for _, nodeID := range nodeIDs {
m, parentID, err := s.ng.TreeGetMeta(ctx, p.BktInfo.CID, p.TreeID, nodeID)
if err != nil {
return nil, err
}
resp := nodeResponse{
parentID: parentID,
nodeID: nodeID,
ts: m.Time,
}
if p.AllAttrs {
resp.meta = kvToTreeMeta(m.Items)
} else {
for _, it := range m.Items {
for _, attr := range p.Meta {
if it.Key == attr {
resp.meta = append(resp.meta, kv{it.Key, it.Value})
}
break
}
}
}
resps = append(resps, resp)
}
return resps, nil
}
func (s treeServiceEngineWrapper) GetSubTree(ctx context.Context, bktInfo *data.BucketInfo, treeID string, rootID uint64, depth uint32) ([]tree.NodeResponse, error) {
var resps []tree.NodeResponse
var traverse func(nodeID uint64, curDepth uint32) error
traverse = func(nodeID uint64, curDepth uint32) error {
m, parentID, err := s.ng.TreeGetMeta(ctx, bktInfo.CID, treeID, nodeID)
if err != nil {
return fmt.Errorf("getting meta: %v", err)
}
resps = append(resps, nodeResponse{
nodeID: nodeID,
parentID: parentID,
ts: m.Time,
meta: kvToTreeMeta(m.Items),
})
if curDepth >= depth {
return nil
}
children, err := s.ng.TreeGetChildren(ctx, bktInfo.CID, treeID, nodeID)
if err != nil {
return fmt.Errorf("getting children: %v", err)
}
for _, child := range children {
if err := traverse(child.ID, curDepth+1); err != nil {
return err
}
}
return nil
}
if err := traverse(rootID, 0); err != nil {
return nil, fmt.Errorf("traversing: %v", err)
}
return resps, nil
}
func (s treeServiceEngineWrapper) AddNode(ctx context.Context, bktInfo *data.BucketInfo, treeID string, parentID uint64, meta map[string]string) (uint64, error) {
desc := pilorama.CIDDescriptor{
CID: bktInfo.CID,
Position: s.pos,
Size: s.size,
}
mv, err := s.ng.TreeMove(ctx, desc, treeID, &pilorama.Move{
Parent: parentID,
Child: pilorama.RootID,
Meta: pilorama.Meta{Items: mapToKV(meta)},
})
return mv.Child, err
}
func (s treeServiceEngineWrapper) AddNodeByPath(ctx context.Context, bktInfo *data.BucketInfo, treeID string, path []string, meta map[string]string) (uint64, error) {
desc := pilorama.CIDDescriptor{
CID: bktInfo.CID,
Position: s.pos,
Size: s.size,
}
mvs, err := s.ng.TreeAddByPath(ctx, desc, treeID, pilorama.AttributeFilename, path, mapToKV(meta))
if err != nil {
return pilorama.TrashID, err
}
return mvs[len(mvs)-1].Child, nil
}
func (s treeServiceEngineWrapper) MoveNode(ctx context.Context, bktInfo *data.BucketInfo, treeID string, nodeID, parentID uint64, meta map[string]string) error {
if nodeID == pilorama.RootID {
return fmt.Errorf("node with ID %d is the root and can't be moved", nodeID)
}
desc := pilorama.CIDDescriptor{
CID: bktInfo.CID,
Position: s.pos,
Size: s.size,
}
_, err := s.ng.TreeMove(ctx, desc, treeID, &pilorama.Move{
Parent: parentID,
Child: nodeID,
Meta: pilorama.Meta{
Items: mapToKV(meta),
},
})
return err
}
func (s treeServiceEngineWrapper) RemoveNode(ctx context.Context, bktInfo *data.BucketInfo, treeID string, nodeID uint64) error {
if nodeID == pilorama.RootID {
return fmt.Errorf("node with ID %d is the root and can't be removed", nodeID)
}
desc := pilorama.CIDDescriptor{
CID: bktInfo.CID,
Position: s.pos,
Size: s.size,
}
_, err := s.ng.TreeMove(ctx, desc, treeID, &pilorama.Move{
Parent: pilorama.TrashID,
Child: nodeID,
})
return err
}
func (s treeServiceEngineWrapper) GetSubTreeStream(
ctx context.Context, bktInfo *data.BucketInfo, treeID string, rootID uint64, depth uint32,
) (tree.SubTreeStream, error) {
panic(unimplementedMessage("TreeService.GetSubTreeStream"))
}
func mapToKV(m map[string]string) []pilorama.KeyValue {
var kvs []pilorama.KeyValue
for k, v := range m {
kvs = append(kvs, pilorama.KeyValue{
Key: k,
Value: []byte(v),
})
}
return kvs
}
func kvToTreeMeta(x []pilorama.KeyValue) []tree.Meta {
ret := make([]tree.Meta, 0, len(x))
for _, x := range x {
ret = append(ret, kv{x.Key, x.Value})
}
return ret
}

View file

@ -1,70 +1,26 @@
package stats package stats
import ( import (
"strings"
"time" "time"
"go.k6.io/k6/js/modules" "go.k6.io/k6/js/modules"
"go.k6.io/k6/metrics" "go.k6.io/k6/metrics"
) )
// RootModule is the global module object type. It is instantiated once per test
// run and will be used to create k6/x/frostfs/stats module instances for each VU.
type RootModule struct {
Instance string
}
var (
tagSet *metrics.TagSet
Registry *metrics.Registry
)
func init() {
Registry = metrics.NewRegistry()
tagSet = Registry.RootTagSet()
modules.Register("k6/x/frostfs/stats", &RootModule{})
}
// SetTags sets additional tags to custom metrics.
// Format: "key1:value1;key2:value2".
// Panics if input has invalid format.
func (m *RootModule) SetTags(labels string) {
kv := make(map[string]string)
pairs := strings.Split(labels, ";")
for _, pair := range pairs {
items := strings.Split(pair, ":")
if len(items) != 2 {
panic("invalid labels format")
}
kv[strings.TrimSpace(items[0])] = strings.TrimSpace(items[1])
}
for k, v := range kv {
tagSet = tagSet.With(k, v)
}
}
func Report(vu modules.VU, metric *metrics.Metric, value float64) { func Report(vu modules.VU, metric *metrics.Metric, value float64) {
metrics.PushIfNotDone(vu.Context(), vu.State().Samples, metrics.Sample{ metrics.PushIfNotDone(vu.Context(), vu.State().Samples, metrics.Sample{
TimeSeries: metrics.TimeSeries{ Metric: metric,
Metric: metric, Time: time.Now(),
Tags: tagSet, Value: value,
},
Time: time.Now(),
Value: value,
}) })
} }
func ReportDataReceived(vu modules.VU, value float64) { func ReportDataReceived(vu modules.VU, value float64) {
vu.State().BuiltinMetrics.DataReceived.Sink.Add( vu.State().BuiltinMetrics.DataReceived.Sink.Add(
metrics.Sample{ metrics.Sample{
TimeSeries: metrics.TimeSeries{ Metric: &metrics.Metric{},
Metric: &metrics.Metric{}, Value: value,
Tags: tagSet, Time: time.Now()},
},
Value: value,
Time: time.Now(),
},
) )
} }
@ -72,12 +28,8 @@ func ReportDataSent(vu modules.VU, value float64) {
state := vu.State() state := vu.State()
state.BuiltinMetrics.DataSent.Sink.Add( state.BuiltinMetrics.DataSent.Sink.Add(
metrics.Sample{ metrics.Sample{
TimeSeries: metrics.TimeSeries{ Metric: &metrics.Metric{},
Metric: &metrics.Metric{}, Value: value,
Tags: tagSet, Time: time.Now()},
},
Value: value,
Time: time.Now(),
},
) )
} }

View file

@ -1,6 +0,0 @@
package version
var (
// Version is the xk6 command-line utils version.
Version = "dev"
)

View file

@ -1,24 +0,0 @@
# This configuration can be used for the local scenario when testing locally.
storage:
shard_num: 1
shard:
0:
metabase:
path: /tmp/k6_local/metabase
perm: 0600
blobstor:
- path: /tmp/k6_local/blobovnicza
type: blobovnicza
perm: 0600
opened_cache_capacity: 32
depth: 1
width: 1
- path: /tmp/k6_local/fstree
type: fstree
perm: 0600
depth: 4
writecache:
enabled: false
gc:
remover_batch_size: 100
remover_sleep_interval: 1m

View file

@ -1,222 +1,176 @@
import { sleep } from 'k6'; import datagen from 'k6/x/frostfs/datagen';
import { SharedArray } from 'k6/data';
import exec from 'k6/execution';
import logging from 'k6/x/frostfs/logging';
import native from 'k6/x/frostfs/native'; import native from 'k6/x/frostfs/native';
import logging from 'k6/x/frostfs/logging';
import registry from 'k6/x/frostfs/registry'; import registry from 'k6/x/frostfs/registry';
import stats from 'k6/x/frostfs/stats'; import { SharedArray } from 'k6/data';
import { sleep } from 'k6';
import { newGenerator } from './libs/datagen.js';
import { parseEnv } from './libs/env-parser.js';
import { textSummary } from './libs/k6-summary-0.0.2.js'; import { textSummary } from './libs/k6-summary-0.0.2.js';
import { uuidv4 } from './libs/k6-utils-1.4.0.js';
parseEnv(); const obj_list = new SharedArray('obj_list', function () {
return JSON.parse(open(__ENV.PREGEN_JSON)).objects;
});
const obj_list = new SharedArray( const container_list = new SharedArray('container_list', function () {
'obj_list', return JSON.parse(open(__ENV.PREGEN_JSON)).containers;
function () { return JSON.parse(open(__ENV.PREGEN_JSON)).objects; }); });
const container_list = new SharedArray(
'container_list',
function () { return JSON.parse(open(__ENV.PREGEN_JSON)).containers; });
const read_size = JSON.parse(open(__ENV.PREGEN_JSON)).obj_size; const read_size = JSON.parse(open(__ENV.PREGEN_JSON)).obj_size;
const summary_json = __ENV.SUMMARY_JSON || '/tmp/summary.json'; const summary_json = __ENV.SUMMARY_JSON || "/tmp/summary.json";
// Select random gRPC endpoint for current VU // Select random gRPC endpoint for current VU
const grpc_endpoints = __ENV.GRPC_ENDPOINTS.split(','); const grpc_endpoints = __ENV.GRPC_ENDPOINTS.split(',');
const grpc_endpoint = const grpc_endpoint = grpc_endpoints[Math.floor(Math.random() * grpc_endpoints.length)];
grpc_endpoints[Math.floor(Math.random() * grpc_endpoints.length)]; const grpc_client = native.connect(grpc_endpoint, '', __ENV.DIAL_TIMEOUT ? parseInt(__ENV.DIAL_TIMEOUT) : 5, __ENV.STREAM_TIMEOUT ? parseInt(__ENV.STREAM_TIMEOUT) : 15);
const grpc_client = native.connect( const log = logging.new().withField("endpoint", grpc_endpoint);
grpc_endpoint, '', __ENV.DIAL_TIMEOUT ? parseInt(__ENV.DIAL_TIMEOUT) : 5,
__ENV.STREAM_TIMEOUT ? parseInt(__ENV.STREAM_TIMEOUT) : 60,
__ENV.PREPARE_LOCALLY ? __ENV.PREPARE_LOCALLY.toLowerCase() === 'true' : false,
1024 * parseInt(__ENV.MAX_OBJECT_SIZE || '0'));
const log = logging.new().withField('endpoint', grpc_endpoint);
const registry_enabled = !!__ENV.REGISTRY_FILE; const registry_enabled = !!__ENV.REGISTRY_FILE;
const obj_registry = const obj_registry = registry_enabled ? registry.open(__ENV.REGISTRY_FILE) : undefined;
registry_enabled ? registry.open(__ENV.REGISTRY_FILE) : undefined;
const duration = __ENV.DURATION; const duration = __ENV.DURATION;
if (!!__ENV.METRIC_TAGS) { const delete_age = __ENV.DELETE_AGE ? parseInt(__ENV.DELETE_AGE) : undefined;
stats.setTags(__ENV.METRIC_TAGS) let obj_to_delete_selector = undefined;
if (registry_enabled && delete_age) {
obj_to_delete_selector = registry.getSelector(
__ENV.REGISTRY_FILE,
"obj_to_delete",
__ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0,
{
status: "created",
age: delete_age,
}
);
} }
const read_age = __ENV.READ_AGE ? parseInt(__ENV.READ_AGE) : 10;
let obj_to_read_selector = undefined; const generator = datagen.generator(1024 * parseInt(__ENV.WRITE_OBJ_SIZE), __ENV.PAYLOAD_TYPE || "");
if (registry_enabled) {
obj_to_read_selector = registry.getLoopedSelector(
__ENV.REGISTRY_FILE, 'obj_to_read',
__ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0, {
status: 'created',
age: read_age,
})
}
const scenarios = {}; const scenarios = {};
const write_vu_count = parseInt(__ENV.WRITERS || '0'); const write_vu_count = parseInt(__ENV.WRITERS || '0');
const write_grpc_chunk_size = 1024 * parseInt(__ENV.GRPC_CHUNK_SIZE || '0')
const generator = newGenerator(write_vu_count > 0);
if (write_vu_count > 0) { if (write_vu_count > 0) {
scenarios.write = { scenarios.write = {
executor: 'constant-vus', executor: 'constant-vus',
vus: write_vu_count, vus: write_vu_count,
duration: `${duration}s`, duration: `${duration}s`,
exec: 'obj_write', exec: 'obj_write',
gracefulStop: '5s', gracefulStop: '5s',
}; };
}
const delete_age = __ENV.DELETE_AGE ? parseInt(__ENV.DELETE_AGE) : undefined;
let obj_to_delete_selector = undefined;
let obj_to_delete_exit_on_null = undefined;
if (registry_enabled && delete_age) {
obj_to_delete_exit_on_null = write_vu_count == 0;
let constructor = obj_to_delete_exit_on_null ? registry.getOneshotSelector
: registry.getSelector;
obj_to_delete_selector =
constructor(__ENV.REGISTRY_FILE, 'obj_to_delete',
__ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0, {
status: 'created',
age: delete_age,
});
} }
const read_vu_count = parseInt(__ENV.READERS || '0'); const read_vu_count = parseInt(__ENV.READERS || '0');
if (read_vu_count > 0) { if (read_vu_count > 0) {
scenarios.read = { scenarios.read = {
executor: 'constant-vus', executor: 'constant-vus',
vus: read_vu_count, vus: read_vu_count,
duration: `${duration}s`, duration: `${duration}s`,
exec: 'obj_read', exec: 'obj_read',
gracefulStop: '5s', gracefulStop: '5s',
}; };
} }
const delete_vu_count = parseInt(__ENV.DELETERS || '0'); const delete_vu_count = parseInt(__ENV.DELETERS || '0');
if (delete_vu_count > 0) { if (delete_vu_count > 0) {
if (!obj_to_delete_selector) { if (!obj_to_delete_selector) {
throw new Error( throw new Error('Positive DELETE worker number without a proper object selector');
'Positive DELETE worker number without a proper object selector'); }
}
scenarios.delete = { scenarios.delete = {
executor: 'constant-vus', executor: 'constant-vus',
vus: delete_vu_count, vus: delete_vu_count,
duration: `${duration}s`, duration: `${duration}s`,
exec: 'obj_delete', exec: 'obj_delete',
gracefulStop: '5s', gracefulStop: '5s',
}; };
} }
export const options = { export const options = {
scenarios, scenarios,
setupTimeout: '5s', setupTimeout: '5s',
}; };
export function setup() { export function setup() {
const total_vu_count = write_vu_count + read_vu_count + delete_vu_count; const total_vu_count = write_vu_count + read_vu_count + delete_vu_count;
console.log(`Pregenerated containers: ${container_list.length}`); console.log(`Pregenerated containers: ${container_list.length}`);
console.log(`Pregenerated read object size: ${read_size}`); console.log(`Pregenerated read object size: ${read_size}`);
console.log(`Pregenerated total objects: ${obj_list.length}`); console.log(`Pregenerated total objects: ${obj_list.length}`);
console.log(`Reading VUs: ${read_vu_count}`); console.log(`Reading VUs: ${read_vu_count}`);
console.log(`Writing VUs: ${write_vu_count}`); console.log(`Writing VUs: ${write_vu_count}`);
console.log(`Deleting VUs: ${delete_vu_count}`); console.log(`Deleting VUs: ${delete_vu_count}`);
console.log(`Total VUs: ${total_vu_count}`); console.log(`Total VUs: ${total_vu_count}`);
const start_timestamp = Date.now()
console.log(
`Load started at: ${Date(start_timestamp).toString()}`)
} }
export function teardown(data) { export function teardown(data) {
if (obj_registry) { if (obj_registry) {
obj_registry.close(); obj_registry.close();
} }
const end_timestamp = Date.now()
console.log(
`Load finished at: ${Date(end_timestamp).toString()}`)
} }
export function handleSummary(data) { export function handleSummary(data) {
return { return {
'stdout': textSummary(data, { indent: ' ', enableColors: false }), 'stdout': textSummary(data, { indent: ' ', enableColors: false }),
[summary_json]: JSON.stringify(data), [summary_json]: JSON.stringify(data),
}; };
} }
export function obj_write() { export function obj_write() {
if (__ENV.SLEEP_WRITE) { if (__ENV.SLEEP_WRITE) {
sleep(__ENV.SLEEP_WRITE); sleep(__ENV.SLEEP_WRITE);
} }
const headers = { unique_header: uuidv4() }; const headers = {
const container = unique_header: uuidv4()
container_list[Math.floor(Math.random() * container_list.length)]; };
const container = container_list[Math.floor(Math.random() * container_list.length)];
const payload = generator.genPayload(); const { payload, hash } = generator.genPayload(registry_enabled);
const resp = const resp = grpc_client.put(container, headers, payload);
grpc_client.put(container, headers, payload, write_grpc_chunk_size); if (!resp.success) {
if (!resp.success) { log.withField("cid", container).error(resp.error);
log.withField('cid', container).error(resp.error); return;
return; }
}
if (obj_registry) { if (obj_registry) {
obj_registry.addObject(container, resp.object_id, '', '', payload.hash()); obj_registry.addObject(container, resp.object_id, "", "", hash);
} }
} }
export function obj_read() { export function obj_read() {
if (__ENV.SLEEP_READ) { if (__ENV.SLEEP_READ) {
sleep(__ENV.SLEEP_READ); sleep(__ENV.SLEEP_READ);
}
if (obj_to_read_selector) {
const obj = obj_to_read_selector.nextObject();
if (!obj) {
return;
} }
const resp = grpc_client.get(obj.c_id, obj.o_id)
const obj = obj_list[Math.floor(Math.random() * obj_list.length)];
const resp = grpc_client.get(obj.container, obj.object)
if (!resp.success) { if (!resp.success) {
log.withFields({ cid: obj.c_id, oid: obj.o_id }).error(resp.error); log.withFields({cid: obj.container, oid: obj.object}).error(resp.error);
} }
return
}
const obj = obj_list[Math.floor(Math.random() * obj_list.length)];
const resp = grpc_client.get(obj.container, obj.object)
if (!resp.success) {
log.withFields({ cid: obj.container, oid: obj.object }).error(resp.error);
}
} }
export function obj_delete() { export function obj_delete() {
if (__ENV.SLEEP_DELETE) { if (__ENV.SLEEP_DELETE) {
sleep(__ENV.SLEEP_DELETE); sleep(__ENV.SLEEP_DELETE);
}
const obj = obj_to_delete_selector.nextObject();
if (!obj) {
if (obj_to_delete_exit_on_null) {
exec.test.abort("No more objects to select");
} }
return;
}
const resp = grpc_client.delete(obj.c_id, obj.o_id); const obj = obj_to_delete_selector.nextObject();
if (!resp.success) { if (!obj) {
// Log errors except (2052 - object already deleted) return;
log.withFields({ cid: obj.c_id, oid: obj.o_id }).error(resp.error); }
return;
}
obj_registry.deleteObject(obj.id); const resp = grpc_client.delete(obj.c_id, obj.o_id);
if (!resp.success) {
// Log errors except (2052 - object already deleted)
log.withFields({cid: obj.c_id, oid: obj.o_id}).error(resp.error);
return;
}
obj_registry.deleteObject(obj.id);
}
export function uuidv4() {
return 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'.replace(/[xy]/g, function(c) {
let r = Math.random() * 16 | 0, v = c === 'x' ? r : (r & 0x3 | 0x8);
return v.toString(16);
});
} }

View file

@ -1,238 +0,0 @@
import { sleep } from 'k6';
import { SharedArray } from 'k6/data';
import logging from 'k6/x/frostfs/logging';
import native from 'k6/x/frostfs/native';
import registry from 'k6/x/frostfs/registry';
import stats from 'k6/x/frostfs/stats';
import { newGenerator } from './libs/datagen.js';
import { parseEnv } from './libs/env-parser.js';
import { textSummary } from './libs/k6-summary-0.0.2.js';
import { uuidv4 } from './libs/k6-utils-1.4.0.js';
parseEnv();
const obj_list = new SharedArray('obj_list', function () {
return JSON.parse(open(__ENV.PREGEN_JSON)).objects;
});
const container_list = new SharedArray('container_list', function () {
return JSON.parse(open(__ENV.PREGEN_JSON)).containers;
});
const read_size = JSON.parse(open(__ENV.PREGEN_JSON)).obj_size;
const summary_json = __ENV.SUMMARY_JSON || '/tmp/summary.json';
// Select random gRPC endpoint for current VU
const grpc_endpoints = __ENV.GRPC_ENDPOINTS.split(',');
const grpc_endpoint =
grpc_endpoints[Math.floor(Math.random() * grpc_endpoints.length)];
const grpc_client = native.connect(
grpc_endpoint, '', __ENV.DIAL_TIMEOUT ? parseInt(__ENV.DIAL_TIMEOUT) : 5,
__ENV.STREAM_TIMEOUT ? parseInt(__ENV.STREAM_TIMEOUT) : 60,
__ENV.PREPARE_LOCALLY ? __ENV.PREPARE_LOCALLY.toLowerCase() === 'true' : false,
1024 * parseInt(__ENV.MAX_OBJECT_SIZE || '0'));
const log = logging.new().withField('endpoint', grpc_endpoint);
const registry_enabled = !!__ENV.REGISTRY_FILE;
const obj_registry =
registry_enabled ? registry.open(__ENV.REGISTRY_FILE) : undefined;
const duration = __ENV.DURATION;
if (!!__ENV.METRIC_TAGS) {
stats.setTags(__ENV.METRIC_TAGS)
}
const delete_age = __ENV.DELETE_AGE ? parseInt(__ENV.DELETE_AGE) : undefined;
let obj_to_delete_selector = undefined;
if (registry_enabled && delete_age) {
obj_to_delete_selector = registry.getSelector(
__ENV.REGISTRY_FILE, 'obj_to_delete',
__ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0, {
status: 'created',
age: delete_age,
});
}
const read_age = __ENV.READ_AGE ? parseInt(__ENV.READ_AGE) : 10;
let obj_to_read_selector = undefined;
if (registry_enabled) {
obj_to_read_selector = registry.getLoopedSelector(
__ENV.REGISTRY_FILE, 'obj_to_read',
__ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0, {
status: 'created',
age: read_age,
})
}
const scenarios = {};
const time_unit = __ENV.TIME_UNIT || '1s';
const pre_alloc_write_vus = parseInt(__ENV.PRE_ALLOC_WRITERS || '0');
const max_write_vus = parseInt(__ENV.MAX_WRITERS || pre_alloc_write_vus);
const write_rate = parseInt(__ENV.WRITE_RATE || '0');
const write_grpc_chunk_size = 1024 * parseInt(__ENV.GRPC_CHUNK_SIZE || '0')
const generator = newGenerator(write_rate > 0);
if (write_rate > 0) {
scenarios.write = {
executor: 'constant-arrival-rate',
duration: `${duration}s`,
preAllocatedVUs: pre_alloc_write_vus,
maxVUs: max_write_vus,
rate: write_rate,
timeUnit: time_unit,
exec: 'obj_write',
gracefulStop: '5s',
};
}
const pre_alloc_read_vus = parseInt(__ENV.PRE_ALLOC_READERS || '0');
const max_read_vus = parseInt(__ENV.MAX_READERS || pre_alloc_read_vus);
const read_rate = parseInt(__ENV.READ_RATE || '0');
if (read_rate > 0) {
scenarios.read = {
executor: 'constant-arrival-rate',
duration: `${duration}s`,
preAllocatedVUs: pre_alloc_write_vus,
maxVUs: max_read_vus,
rate: read_rate,
timeUnit: time_unit,
exec: 'obj_read',
gracefulStop: '5s',
};
}
const pre_alloc_delete_vus = parseInt(__ENV.PRE_ALLOC_DELETERS || '0');
const max_delete_vus = parseInt(__ENV.MAX_DELETERS || pre_alloc_write_vus);
const delete_rate = parseInt(__ENV.DELETE_RATE || '0');
if (delete_rate > 0) {
if (!obj_to_delete_selector) {
throw new Error(
'Positive DELETE worker number without a proper object selector');
}
scenarios.delete = {
executor: 'constant-arrival-rate',
duration: `${duration}s`,
preAllocatedVUs: pre_alloc_delete_vus,
maxVUs: max_delete_vus,
rate: delete_rate,
timeUnit: time_unit,
exec: 'obj_delete',
gracefulStop: '5s',
};
}
export const options = {
scenarios,
setupTimeout: '5s',
};
export function setup() {
const total_pre_allocated_vu_count =
pre_alloc_write_vus + pre_alloc_read_vus + pre_alloc_delete_vus;
const total_max_vu_count = max_read_vus + max_write_vus + max_delete_vus
console.log(`Pregenerated containers: ${container_list.length}`);
console.log(`Pregenerated read object size: ${read_size}`);
console.log(`Pregenerated total objects: ${obj_list.length}`);
console.log(`Pre allocated reading VUs: ${pre_alloc_read_vus}`);
console.log(`Pre allocated writing VUs: ${pre_alloc_write_vus}`);
console.log(`Pre allocated deleting VUs: ${pre_alloc_delete_vus}`);
console.log(`Total pre allocated VUs: ${total_pre_allocated_vu_count}`);
console.log(`Max reading VUs: ${max_read_vus}`);
console.log(`Max writing VUs: ${max_write_vus}`);
console.log(`Max deleting VUs: ${max_delete_vus}`);
console.log(`Total max VUs: ${total_max_vu_count}`);
console.log(`Time unit: ${time_unit}`);
console.log(`Read rate: ${read_rate}`);
console.log(`Writing rate: ${write_rate}`);
console.log(`Delete rate: ${delete_rate}`);
const start_timestamp = Date.now()
console.log(
`Load started at: ${Date(start_timestamp).toString()}`)
}
export function teardown(data) {
if (obj_registry) {
obj_registry.close();
}
const end_timestamp = Date.now()
console.log(
`Load finished at: ${Date(end_timestamp).toString()}`)
}
export function handleSummary(data) {
return {
'stdout': textSummary(data, { indent: ' ', enableColors: false }),
[summary_json]: JSON.stringify(data),
};
}
export function obj_write() {
if (__ENV.SLEEP_WRITE) {
sleep(__ENV.SLEEP_WRITE);
}
const headers = { unique_header: uuidv4() };
const container =
container_list[Math.floor(Math.random() * container_list.length)];
const payload = generator.genPayload();
const resp =
grpc_client.put(container, headers, payload, write_grpc_chunk_size);
if (!resp.success) {
log.withField('cid', container).error(resp.error);
return;
}
if (obj_registry) {
obj_registry.addObject(container, resp.object_id, '', '', payload.hash());
}
}
export function obj_read() {
if (__ENV.SLEEP_READ) {
sleep(__ENV.SLEEP_READ);
}
if (obj_to_read_selector) {
const obj = obj_to_read_selector.nextObject();
if (!obj) {
return;
}
const resp = grpc_client.get(obj.c_id, obj.o_id)
if (!resp.success) {
log.withFields({ cid: obj.c_id, oid: obj.o_id }).error(resp.error);
}
return
}
const obj = obj_list[Math.floor(Math.random() * obj_list.length)];
const resp = grpc_client.get(obj.container, obj.object)
if (!resp.success) {
log.withFields({ cid: obj.container, oid: obj.object }).error(resp.error);
}
}
export function obj_delete() {
if (__ENV.SLEEP_DELETE) {
sleep(__ENV.SLEEP_DELETE);
}
const obj = obj_to_delete_selector.nextObject();
if (!obj) {
return;
}
const resp = grpc_client.delete(obj.c_id, obj.o_id);
if (!resp.success) {
// Log errors except (2052 - object already deleted)
log.withFields({ cid: obj.c_id, oid: obj.o_id }).error(resp.error);
return;
}
obj_registry.deleteObject(obj.id);
}

View file

@ -1,143 +1,125 @@
import {sleep} from 'k6'; import datagen from 'k6/x/frostfs/datagen';
import {SharedArray} from 'k6/data';
import http from 'k6/http';
import logging from 'k6/x/frostfs/logging';
import registry from 'k6/x/frostfs/registry'; import registry from 'k6/x/frostfs/registry';
import stats from 'k6/x/frostfs/stats'; import http from 'k6/http';
import { SharedArray } from 'k6/data';
import { sleep } from 'k6';
import { textSummary } from './libs/k6-summary-0.0.2.js';
import {newGenerator} from './libs/datagen.js'; const obj_list = new SharedArray('obj_list', function () {
import {parseEnv} from './libs/env-parser.js'; return JSON.parse(open(__ENV.PREGEN_JSON)).objects;
import {textSummary} from './libs/k6-summary-0.0.2.js';
import {uuidv4} from './libs/k6-utils-1.4.0.js';
parseEnv();
const obj_list = new SharedArray('obj_list', function() {
return JSON.parse(open(__ENV.PREGEN_JSON)).objects;
}); });
const container_list = new SharedArray('container_list', function() { const container_list = new SharedArray('container_list', function () {
return JSON.parse(open(__ENV.PREGEN_JSON)).containers; return JSON.parse(open(__ENV.PREGEN_JSON)).containers;
}); });
const read_size = JSON.parse(open(__ENV.PREGEN_JSON)).obj_size; const read_size = JSON.parse(open(__ENV.PREGEN_JSON)).obj_size;
const summary_json = __ENV.SUMMARY_JSON || '/tmp/summary.json'; const summary_json = __ENV.SUMMARY_JSON || "/tmp/summary.json";
// Select random HTTP endpoint for current VU // Select random HTTP endpoint for current VU
const http_endpoints = __ENV.HTTP_ENDPOINTS.split(','); const http_endpoints = __ENV.HTTP_ENDPOINTS.split(',');
const http_endpoint = const http_endpoint = http_endpoints[Math.floor(Math.random() * http_endpoints.length)];
http_endpoints[Math.floor(Math.random() * http_endpoints.length)]; const log = logging.new().withField("endpoint", http_endpoint);
const log = logging.new().withField('endpoint', http_endpoint);
const registry_enabled = !!__ENV.REGISTRY_FILE; const registry_enabled = !!__ENV.REGISTRY_FILE;
const obj_registry = const obj_registry = registry_enabled ? registry.open(__ENV.REGISTRY_FILE) : undefined;
registry_enabled ? registry.open(__ENV.REGISTRY_FILE) : undefined;
const duration = __ENV.DURATION; const duration = __ENV.DURATION;
if (!!__ENV.METRIC_TAGS) { const generator = datagen.generator(1024 * parseInt(__ENV.WRITE_OBJ_SIZE), __ENV.PAYLOAD_TYPE || "");
stats.setTags(__ENV.METRIC_TAGS)
}
const scenarios = {}; const scenarios = {};
const write_vu_count = parseInt(__ENV.WRITERS || '0'); const write_vu_count = parseInt(__ENV.WRITERS || '0');
const generator = newGenerator(write_vu_count > 0);
if (write_vu_count > 0) { if (write_vu_count > 0) {
scenarios.write = { scenarios.write = {
executor: 'constant-vus', executor: 'constant-vus',
vus: write_vu_count, vus: write_vu_count,
duration: `${duration}s`, duration: `${duration}s`,
exec: 'obj_write', exec: 'obj_write',
gracefulStop: '5s', gracefulStop: '5s',
} }
} }
const read_vu_count = parseInt(__ENV.READERS || '0'); const read_vu_count = parseInt(__ENV.READERS || '0');
if (read_vu_count > 0) { if (read_vu_count > 0) {
scenarios.read = { scenarios.read = {
executor: 'constant-vus', executor: 'constant-vus',
vus: read_vu_count, vus: read_vu_count,
duration: `${duration}s`, duration: `${duration}s`,
exec: 'obj_read', exec: 'obj_read',
gracefulStop: '5s', gracefulStop: '5s',
} }
} }
export const options = { export const options = {
scenarios, scenarios,
setupTimeout: '5s', setupTimeout: '5s',
}; };
export function setup() { export function setup() {
const total_vu_count = write_vu_count + read_vu_count; const total_vu_count = write_vu_count + read_vu_count;
console.log(`Pregenerated containers: ${container_list.length}`); console.log(`Pregenerated containers: ${container_list.length}`);
console.log(`Pregenerated read object size: ${read_size}`); console.log(`Pregenerated read object size: ${read_size}`);
console.log(`Pregenerated total objects: ${obj_list.length}`); console.log(`Pregenerated total objects: ${obj_list.length}`);
console.log(`Reading VUs: ${read_vu_count}`); console.log(`Reading VUs: ${read_vu_count}`);
console.log(`Writing VUs: ${write_vu_count}`); console.log(`Writing VUs: ${write_vu_count}`);
console.log(`Total VUs: ${total_vu_count}`); console.log(`Total VUs: ${total_vu_count}`);
const start_timestamp = Date.now()
console.log(
`Load started at: ${Date(start_timestamp).toString()}`)
} }
export function teardown(data) { export function teardown(data) {
if (obj_registry) { if (obj_registry) {
obj_registry.close(); obj_registry.close();
} }
const end_timestamp = Date.now()
console.log(
`Load finished at: ${Date(end_timestamp).toString()}`)
} }
export function handleSummary(data) { export function handleSummary(data) {
return { return {
'stdout': textSummary(data, {indent: ' ', enableColors: false}), 'stdout': textSummary(data, { indent: ' ', enableColors: false }),
[summary_json]: JSON.stringify(data), [summary_json]: JSON.stringify(data),
}; };
} }
export function obj_write() { export function obj_write() {
if (__ENV.SLEEP_WRITE) { if (__ENV.SLEEP_WRITE) {
sleep(__ENV.SLEEP_WRITE); sleep(__ENV.SLEEP_WRITE);
} }
const container = const container = container_list[Math.floor(Math.random() * container_list.length)];
container_list[Math.floor(Math.random() * container_list.length)];
const payload = generator.genPayload(); const { payload, hash } = generator.genPayload(registry_enabled);
const data = { const data = {
field: uuidv4(), field: uuidv4(),
// Because we use `file` wrapping and it is not straightforward to use file: http.file(payload, "random.data"),
// streams here, };
// `-e STREAMING=1` has no effect for this scenario.
file: http.file(payload.bytes(), 'random.data'),
};
const resp = http.post(`http://${http_endpoint}/upload/${container}`, data); const resp = http.post(`http://${http_endpoint}/upload/${container}`, data);
if (resp.status != 200) { if (resp.status != 200) {
log.withFields({status: resp.status, cid: container}).error(resp.error); log.withFields({status: resp.status, cid: container}).error(resp.error);
return; return;
} }
const object_id = JSON.parse(resp.body).object_id; const object_id = JSON.parse(resp.body).object_id;
if (obj_registry) { if (obj_registry) {
obj_registry.addObject(container, object_id, '', '', payload.hash()); obj_registry.addObject(container, object_id, "", "", hash);
} }
} }
export function obj_read() { export function obj_read() {
if (__ENV.SLEEP_READ) { if (__ENV.SLEEP_READ) {
sleep(__ENV.SLEEP_READ); sleep(__ENV.SLEEP_READ);
} }
const obj = obj_list[Math.floor(Math.random() * obj_list.length)]; const obj = obj_list[Math.floor(Math.random() * obj_list.length)];
const resp = const resp = http.get(`http://${http_endpoint}/get/${obj.container}/${obj.object}`);
http.get(`http://${http_endpoint}/get/${obj.container}/${obj.object}`); if (resp.status != 200) {
if (resp.status != 200) { log.withFields({status: resp.status, cid: obj.container, oid: obj.object}).error(resp.error);
log.withFields({status: resp.status, cid: obj.container, oid: obj.object}) }
.error(resp.error); }
}
export function uuidv4() {
return 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'.replace(/[xy]/g, function(c) {
let r = Math.random() * 16 | 0, v = c === 'x' ? r : (r & 0x3 | 0x8);
return v.toString(16);
});
} }

View file

@ -1,8 +0,0 @@
import datagen from 'k6/x/frostfs/datagen';
export function newGenerator(condition) {
if (condition) {
return datagen.generator(1024 * parseInt(__ENV.WRITE_OBJ_SIZE), __ENV.PAYLOAD_TYPE || "", !!__ENV.STREAMING);
}
return undefined;
}

View file

@ -1,10 +0,0 @@
import env from 'k6/x/frostfs/env';
export function parseEnv() {
if (__ENV.ENV_FILE) {
const parsedVars = env.parse(__ENV.ENV_FILE)
for (const prop in parsedVars) {
__ENV[prop] = __ENV[prop] || parsedVars[prop];
}
}
}

View file

@ -1,2 +0,0 @@
(()=>{"use strict";var t={n:r=>{var e=r&&r.__esModule?()=>r.default:()=>r;return t.d(e,{a:e}),e},d:(r,e)=>{for(var n in e)t.o(e,n)&&!t.o(r,n)&&Object.defineProperty(r,n,{enumerable:!0,get:e[n]})},o:(t,r)=>Object.prototype.hasOwnProperty.call(t,r),r:t=>{"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(t,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(t,"__esModule",{value:!0})}},r={};t.r(r),t.d(r,{findBetween:()=>x,getCurrentStageIndex:()=>i,normalDistributionStages:()=>m,parseDuration:()=>o,randomIntBetween:()=>d,randomItem:()=>h,randomString:()=>p,tagWithCurrentStageIndex:()=>u,tagWithCurrentStageProfile:()=>s,uuidv4:()=>g});const e=require("k6/execution");var n=t.n(e);function o(t){if(null==t||t.length<1)throw new Error("str is empty");for(var r=0,e="",n={},o=0;o<t.length;o++)if((a(t[o])||"."==t[o])&&(e+=t[o]),null!=t[o+1]&&!a(t[o+1])&&"."!=t[o+1]){var i=parseFloat(e,10),u=t[o+1];switch(u){case"d":r+=24*i*60*60*1e3;break;case"h":r+=60*i*60*1e3;break;case"m":o+2<t.length&&"s"==t[o+2]?(r+=Math.trunc(i),o++,u="ms"):r+=60*i*1e3;break;case"s":r+=1e3*i;break;default:throw new Error("".concat(u," is an unsupported time unit"))}if(n[u])throw new Error("".concat(u," time unit is provided multiple times"));n[u]=!0,o++,e=""}return e.length>0&&(r+=parseFloat(e,10)),r}function a(t){return t>="0"&&t<="9"}function i(){if(null==n()||null==n().test||null==n().test.options)throw new Error("k6/execution.test.options is undefined - getCurrentStageIndex requires a k6 v0.38.0 or later. Please, upgrade for getting k6/execution.test.options supported.");var t=n().test.options.scenarios[n().scenario.name];if(null==t)throw new Error("the exec.test.options object doesn't contain the current scenario ".concat(n().scenario.name));if(null==t.stages)throw new Error("only ramping-vus or ramping-arravial-rate supports stages, it is not possible to get a stage index on other executors.");if(t.stages.length<1)throw new Error("the current scenario ".concat(t.name," doesn't contain any stage"));for(var r=0,e=new Date-n().scenario.startTime,a=0;a<t.stages.length;a++)if(e<(r+=o(t.stages[a].duration)))return a;return t.stages.length-1}function u(){n().vu.tags.stage=i()}function s(){n().vu.tags.stage_profile=function(){var t=i();if(t<1)return"ramp-up";var r=n().test.options.scenarios[n().scenario.name].stages,e=r[t],o=r[t-1];return e.target>o.target?"ramp-up":o.target==e.target?"steady":"ramp-down"}()}const l=require("k6/crypto");function c(t){return function(t){if(Array.isArray(t))return f(t)}(t)||function(t){if("undefined"!=typeof Symbol&&null!=t[Symbol.iterator]||null!=t["@@iterator"])return Array.from(t)}(t)||function(t,r){if(!t)return;if("string"==typeof t)return f(t,r);var e=Object.prototype.toString.call(t).slice(8,-1);"Object"===e&&t.constructor&&(e=t.constructor.name);if("Map"===e||"Set"===e)return Array.from(t);if("Arguments"===e||/^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(e))return f(t,r)}(t)||function(){throw new TypeError("Invalid attempt to spread non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")}()}function f(t,r){(null==r||r>t.length)&&(r=t.length);for(var e=0,n=new Array(r);e<r;e++)n[e]=t[e];return n}function g(){var t=arguments.length>0&&void 0!==arguments[0]&&arguments[0];return t?y():v()}function d(t,r){return Math.floor(Math.random()*(r-t+1)+t)}function h(t){return t[Math.floor(Math.random()*t.length)]}function p(t){for(var r=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"abcdefghijklmnopqrstuvwxyz",e="";t--;)e+=r[Math.random()*r.length|0];return e}function x(t,r,e){for(var n,o=arguments.length>3&&void 0!==arguments[3]&&arguments[3],a=[],i=!0,u=0;i&&-1!=(n=t.indexOf(r))&&(n+=r.length,-1!=(u=t.indexOf(e,n)));){var s=t.substring(n,u);if(!o)return s;a.push(s),t=t.substring(u+e.length)}return a.length?a:null}function m(t,r){var e=arguments.length>2&&void 0!==arguments[2]?arguments[2]:10;function n(t,r,e){return Math.exp(-.5*Math.pow((e-t)/r,2))/(r*Math.sqrt(2*Math.PI))}for(var o=0,a=1,i=new Array(e+2).fill(0),u=new Array(e+2).fill(Math.ceil(r/6)),s=[],l=0;l<=e;l++)i[l]=n(o,a,-2*a+4*a*l/e);for(var f=Math.max.apply(Math,c(i)),g=i.map((function(r){return Math.round(r*t/f)})),d=1;d<=e;d++)u[d]=Math.ceil(4*r/(6*e));for(var h=0;h<=e+1;h++)s.push({duration:"".concat(u[h],"s"),target:g[h]});return s}function v(){return"xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx".replace(/[xy]/g,(function(t){var r=16*Math.random()|0;return("x"===t?r:3&r|8).toString(16)}))}function y(){for(var t=[],r=0;r<256;++r)t.push((r+256).toString(16).slice(1));var e=new Uint8Array((0,l.randomBytes)(16));return e[6]=15&e[6]|64,e[8]=63&e[8]|128,(t[e[0]]+t[e[1]]+t[e[2]]+t[e[3]]+"-"+t[e[4]]+t[e[5]]+"-"+t[e[6]]+t[e[7]]+"-"+t[e[8]]+t[e[9]]+"-"+t[e[10]]+t[e[11]]+t[e[12]]+t[e[13]]+t[e[14]]+t[e[15]]).toLowerCase()}var w=exports;for(var b in r)w[b]=r[b];r.__esModule&&Object.defineProperty(w,"__esModule",{value:!0})})();
//# sourceMappingURL=index.js.map

View file

@ -1,34 +0,0 @@
import { uuidv4 } from './k6-utils-1.4.0.js';
export function generateS3Key() {
let width = parseInt(__ENV.DIR_WIDTH || '0');
let height = parseInt(__ENV.DIR_HEIGHT || '0');
let key = ''
if (width > 0 && height > 0) {
for (let index = 0; index < height; index++) {
const w = Math.floor(Math.random() * width) + 1;
key = key + 'dir' + w + '/';
}
}
key += objName();
return key;
}
const asciiLetters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890"
function objName() {
if (__ENV.OBJ_NAME) {
return __ENV.OBJ_NAME;
}
const length = parseInt(__ENV.OBJ_NAME_LENGTH || '0');
if (length > 0) {
let name = "";
for (let i = 0; i < length; i++) {
name += asciiLetters.charAt(Math.floor(Math.random() * asciiLetters.length));
}
return name;
}
return uuidv4();
}

View file

@ -1,177 +0,0 @@
import {SharedArray} from 'k6/data';
import exec from 'k6/execution';
import local from 'k6/x/frostfs/local';
import logging from 'k6/x/frostfs/logging';
import registry from 'k6/x/frostfs/registry';
import stats from 'k6/x/frostfs/stats';
import {newGenerator} from './libs/datagen.js';
import {parseEnv} from './libs/env-parser.js';
import {textSummary} from './libs/k6-summary-0.0.2.js';
import {uuidv4} from './libs/k6-utils-1.4.0.js';
parseEnv();
const obj_list = new SharedArray('obj_list', function() {
return JSON.parse(open(__ENV.PREGEN_JSON)).objects;
});
const container_list = new SharedArray('container_list', function() {
return JSON.parse(open(__ENV.PREGEN_JSON)).containers;
});
const read_size = JSON.parse(open(__ENV.PREGEN_JSON)).obj_size;
const summary_json = __ENV.SUMMARY_JSON || '/tmp/summary.json';
const config_file = __ENV.CONFIG_FILE;
const config_dir = __ENV.CONFIG_DIR;
const debug_logger = (__ENV.DEBUG_LOGGER || 'false') == 'true';
const max_total_size_gb =
__ENV.MAX_TOTAL_SIZE_GB ? parseInt(__ENV.MAX_TOTAL_SIZE_GB) : 0;
const local_client =
local.connect(config_file, config_dir, '', debug_logger, max_total_size_gb);
const log = logging.new().withFields(
{'config_file': config_file, 'config_dir': config_dir});
const registry_enabled = !!__ENV.REGISTRY_FILE;
const obj_registry =
registry_enabled ? registry.open(__ENV.REGISTRY_FILE) : undefined;
const duration = __ENV.DURATION;
if (!!__ENV.METRIC_TAGS) {
stats.setTags(__ENV.METRIC_TAGS)
}
const delete_age = __ENV.DELETE_AGE ? parseInt(__ENV.DELETE_AGE) : undefined;
let obj_to_delete_selector = undefined;
if (registry_enabled && delete_age) {
obj_to_delete_selector = registry.getSelector(
__ENV.REGISTRY_FILE, 'obj_to_delete',
__ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0, {
status: 'created',
age: delete_age,
});
}
const scenarios = {};
const write_vu_count = parseInt(__ENV.WRITERS || '0');
const generator = newGenerator(write_vu_count > 0);
if (write_vu_count > 0) {
scenarios.write = {
executor: 'constant-vus',
vus: write_vu_count,
duration: `${duration}s`,
exec: 'obj_write',
gracefulStop: '5s',
};
}
const read_vu_count = parseInt(__ENV.READERS || '0');
if (read_vu_count > 0) {
scenarios.read = {
executor: 'constant-vus',
vus: read_vu_count,
duration: `${duration}s`,
exec: 'obj_read',
gracefulStop: '5s',
};
}
const delete_vu_count = parseInt(__ENV.DELETERS || '0');
if (delete_vu_count > 0) {
if (!obj_to_delete_selector) {
throw new Error(
'Positive DELETE worker number without a proper object selector');
}
scenarios.delete = {
executor: 'constant-vus',
vus: delete_vu_count,
duration: `${duration}s`,
exec: 'obj_delete',
gracefulStop: '5s',
};
}
export const options = {
scenarios,
setupTimeout: '5s',
};
export function setup() {
const total_vu_count = write_vu_count + read_vu_count + delete_vu_count;
console.log(`Pregenerated containers: ${container_list.length}`);
console.log(`Pregenerated read object size: ${read_size}`);
console.log(`Pregenerated total objects: ${obj_list.length}`);
console.log(`Reading VUs: ${read_vu_count}`);
console.log(`Writing VUs: ${write_vu_count}`);
console.log(`Deleting VUs: ${delete_vu_count}`);
console.log(`Total VUs: ${total_vu_count}`);
const start_timestamp = Date.now()
console.log(
`Load started at: ${Date(start_timestamp).toString()}`)
}
export function teardown(data) {
if (obj_registry) {
obj_registry.close();
}
const end_timestamp = Date.now()
console.log(
`Load finished at: ${Date(end_timestamp).toString()}`)
}
export function handleSummary(data) {
return {
'stdout': textSummary(data, {indent: ' ', enableColors: false}),
[summary_json]: JSON.stringify(data),
};
}
export function obj_write() {
const headers = {unique_header: uuidv4()};
const container =
container_list[Math.floor(Math.random() * container_list.length)];
const payload = generator.genPayload();
const resp = local_client.put(container, headers, payload);
if (!resp.success) {
if (resp.abort) {
exec.test.abort(resp.error);
}
log.withField('cid', container).error(resp.error);
return;
}
if (obj_registry) {
obj_registry.addObject(container, resp.object_id, '', '', payload.hash());
}
}
export function obj_read() {
const obj = obj_list[Math.floor(Math.random() * obj_list.length)];
const resp = local_client.get(obj.container, obj.object)
if (!resp.success) {
log.withFields({cid: obj.container, oid: obj.object}).error(resp.error);
}
}
export function obj_delete() {
const obj = obj_to_delete_selector.nextObject();
if (!obj) {
return;
}
const resp = local_client.delete(obj.c_id, obj.o_id);
if (!resp.success) {
// Log errors except (2052 - object already deleted)
log.withFields({cid: obj.c_id, oid: obj.o_id}).error(resp.error);
return;
}
obj_registry.deleteObject(obj.id);
}

View file

@ -1,54 +1,50 @@
import uuid import uuid
from helpers.cmd import execute_cmd, log from helpers.cmd import execute_cmd
def create_bucket(endpoint, versioning, location, acl, no_verify_ssl): def create_bucket(endpoint, versioning, location):
configuration = "" bucket_create_marker = False
if location: if location:
configuration = f"--create-bucket-configuration 'LocationConstraint={location}'" location = f"--create-bucket-configuration 'LocationConstraint={location}'"
if acl:
acl = f"--acl {acl}"
bucket_name = str(uuid.uuid4()) bucket_name = str(uuid.uuid4())
no_verify_ssl_str = "--no-verify-ssl" if no_verify_ssl else ""
cmd_line = f"aws {no_verify_ssl_str} s3api create-bucket --bucket {bucket_name} " \
f"--endpoint {endpoint} {configuration} {acl} "
cmd_line_ver = f"aws {no_verify_ssl_str} s3api put-bucket-versioning --bucket {bucket_name} " \
f"--versioning-configuration Status=Enabled --endpoint {endpoint}"
output, success = execute_cmd(cmd_line) cmd_line = f"aws --no-verify-ssl s3api create-bucket --bucket {bucket_name} " \
f"--endpoint http://{endpoint} {location}"
cmd_line_ver = f"aws --no-verify-ssl s3api put-bucket-versioning --bucket {bucket_name} " \
f"--versioning-configuration Status=Enabled --endpoint http://{endpoint} "
if not success and "succeeded and you already own it" not in output: out, success = execute_cmd(cmd_line)
log(f"{cmd_line}\n"
f"Bucket {bucket_name} has not been created:\n"
f"Error: {output}", endpoint)
return False
if versioning: if not success:
output, success = execute_cmd(cmd_line_ver) if "succeeded and you already own it" in out:
if not success: bucket_create_marker = True
log(f"{cmd_line_ver}\n"
f"Bucket versioning has not been applied for bucket {bucket_name}\n"
f"Error: {output}", endpoint)
else: else:
log(f"Bucket versioning has been applied for bucket {bucket_name}", endpoint) print(f" > Bucket {bucket_name} has not been created.")
else:
bucket_create_marker = True
print(f"cmd: {cmd_line}")
if bucket_create_marker and versioning == "True":
out, success = execute_cmd(cmd_line_ver)
if not success:
print(f" > Bucket versioning has not been applied for bucket {bucket_name}.")
else:
print(f" > Bucket versioning has been applied.")
log(f"Created bucket: {bucket_name} ({location})", endpoint)
return bucket_name return bucket_name
def upload_object(bucket, payload_filepath, endpoint, no_verify_ssl): def upload_object(bucket, payload_filepath, endpoint):
object_name = str(uuid.uuid4()) object_name = str(uuid.uuid4())
no_verify_ssl_str = "--no-verify-ssl" if no_verify_ssl else ""
cmd_line = f"aws {no_verify_ssl_str} s3api put-object --bucket {bucket} --key {object_name} " \ cmd_line = f"aws s3api put-object --bucket {bucket} --key {object_name} " \
f"--body {payload_filepath} --endpoint {endpoint}" f"--body {payload_filepath} --endpoint http://{endpoint}"
output, success = execute_cmd(cmd_line) out, success = execute_cmd(cmd_line)
if not success: if not success:
log(f"{cmd_line}\n" print(f" > Object {object_name} has not been uploaded.")
f"Object {object_name} has not been uploaded\n"
f"Error: {output}", endpoint)
return False return False
else:
return bucket, endpoint, object_name return object_name

View file

@ -1,12 +1,9 @@
import os import os
import shlex import shlex
import sys import sys
from datetime import datetime
from subprocess import check_output, CalledProcessError, STDOUT from subprocess import check_output, CalledProcessError, STDOUT
def log(message, endpoint):
time = datetime.utcnow()
print(f"{time} at {endpoint}: {message}")
def execute_cmd(cmd_line): def execute_cmd(cmd_line):
cmd_args = shlex.split(cmd_line) cmd_args = shlex.split(cmd_line)
@ -22,9 +19,10 @@ def execute_cmd(cmd_line):
return output, success return output, success
def random_payload(file, size): def random_payload(payload_filepath, size):
file.write(os.urandom(1024 * int(size))) with open('%s' % payload_filepath, 'w+b') as fout:
file.flush() fout.write(os.urandom(1024 * int(size)))
class ProgressBar: class ProgressBar:
@staticmethod @staticmethod

View file

@ -1,163 +1,81 @@
import re import re
from helpers.cmd import execute_cmd, log
def create_container(endpoint, policy, container_creation_retry, wallet_path, config, acl, local=False, retry=0): from helpers.cmd import execute_cmd
if retry > int(container_creation_retry):
raise ValueError(f"unable to create container: too many unsuccessful attempts")
if wallet_path:
wallet_file = f"--wallet {wallet_path}" def create_container(endpoint, policy, wallet_file, wallet_config):
if config: cmd_line = f"frostfs-cli --rpc-endpoint {endpoint} container create --wallet {wallet_file} --config {wallet_config} " \
wallet_config = f"--config {config}" f" --policy '{policy}' --basic-acl public-read-write --await"
if acl:
acl_param = f"--basic-acl {acl}"
cmd_line = f"frostfs-cli --rpc-endpoint {endpoint} container create {wallet_file} {wallet_config} " \
f" --policy '{policy}' {acl_param} --await"
output, success = execute_cmd(cmd_line) output, success = execute_cmd(cmd_line)
if not success: if not success:
log(f"{cmd_line}\n" print(f" > Container has not been created:\n{output}")
f"Container has not been created\n"
f"{output}", endpoint)
return False return False
else:
try:
fst_str = output.split('\n')[0]
except Exception:
log(f"{cmd_line}\n"
f"Incorrect output\n"
f"Output: {output or '<empty>'}", endpoint)
return False
splitted = fst_str.split(": ")
if len(splitted) != 2:
raise ValueError(f"no CID was parsed from command output:\t{fst_str}")
cid = splitted[1]
log(f"Created container: {cid} ({policy})", endpoint)
if not local:
return cid
cmd_line = f"frostfs-cli netmap nodeinfo --rpc-endpoint {endpoint} {wallet_file} {wallet_config}"
output, success = execute_cmd(cmd_line)
if not success:
log(f"{cmd_line}\n"
f"Failed to get nodeinfo\n"
f"{output}", endpoint)
return False
try:
fst_str = output.split('\n')[0]
except Exception:
log(f"{cmd_line}\n"
f"Incorrect output\n"
f"Output: {output or '<empty>'}", endpoint)
return False
splitted = fst_str.split(": ")
if len(splitted) != 2 or len(splitted[1]) == 0:
raise ValueError(f"no node key was parsed from command output:\t{fst_str}")
node_key = splitted[1]
cmd_line = f"frostfs-cli container nodes --rpc-endpoint {endpoint} {wallet_file} {wallet_config} --cid {cid}"
output, success = execute_cmd(cmd_line)
if not success:
log(f"{cmd_line}\n"
f"Failed to get container nodes\n"
f"{output}", endpoint)
return False
for output_str in output.split('\n'):
output_str = output_str.lstrip().rstrip()
if not output_str.startswith("Node "):
continue
splitted = output_str.split(": ")
if len(splitted) != 2 or len(splitted[1]) == 0:
continue
try: try:
k = splitted[1].split(" ")[0] fst_str = output.split('\n')[0]
except Exception: except Exception:
log(f"{cmd_line}\n" print(f"Got empty output: {output}")
f"Incorrect output\n" return False
f"Output: {output or '<empty>'}", endpoint) splitted = fst_str.split(": ")
continue if len(splitted) != 2:
if k == node_key: raise ValueError(f"no CID was parsed from command output: \t{fst_str}")
return cid
log(f"Created container {cid} is not stored on {endpoint}, creating another one...", endpoint) print(f"Created container: {splitted[1]}")
return create_container(endpoint, policy, container_creation_retry, wallet_path, config, acl, local, retry + 1)
return splitted[1]
def upload_object(container, payload_filepath, endpoint, wallet_file, wallet_config): def upload_object(container, payload_filepath, endpoint, wallet_file, wallet_config):
object_name = "" object_name = ""
if wallet_file: cmd_line = f"frostfs-cli --rpc-endpoint {endpoint} object put --file {payload_filepath} --wallet {wallet_file} --config {wallet_config} " \
wallet_file = "--wallet " + wallet_file
if wallet_config:
wallet_config = "--config " + wallet_config
cmd_line = f"frostfs-cli --rpc-endpoint {endpoint} object put --file {payload_filepath} {wallet_file} {wallet_config} " \
f"--cid {container} --no-progress" f"--cid {container} --no-progress"
output, success = execute_cmd(cmd_line) output, success = execute_cmd(cmd_line)
if not success: if not success:
log(f"{cmd_line}\n" print(f" > Object {object_name} has not been uploaded:\n{output}")
f"Object {object_name} has not been uploaded\n"
f"Error: {output}", endpoint)
return False return False
else:
try: try:
# taking second string from command output # taking second string from command output
snd_str = output.split('\n')[1] snd_str = output.split('\n')[1]
except Exception: except Exception:
log(f"{cmd_line}\n" print(f"Got empty input: {output}")
f"Incorrect output\n" return False
f"Output: {output or '<empty>'}", endpoint) splitted = snd_str.split(": ")
return False if len(splitted) != 2:
splitted = snd_str.split(": ") raise Exception(f"no OID was parsed from command output: \t{snd_str}")
if len(splitted) != 2: return splitted[1]
raise Exception(f"no OID was parsed from command output: \t{snd_str}")
return container, endpoint, splitted[1]
def get_object(cid, oid, endpoint, out_filepath, wallet_file, wallet_config): def get_object(cid, oid, endpoint, out_filepath, wallet_file, wallet_config):
if wallet_file: cmd_line = f"frostfs-cli object get -r {endpoint} --cid {cid} --oid {oid} --wallet {wallet_file} --config {wallet_config} " \
wallet_file = "--wallet " + wallet_file
if wallet_config:
wallet_config = "--config " + wallet_config
cmd_line = f"frostfs-cli object get -r {endpoint} --cid {cid} --oid {oid} {wallet_file} {wallet_config} " \
f"--file {out_filepath}" f"--file {out_filepath}"
output, success = execute_cmd(cmd_line) output, success = execute_cmd(cmd_line)
if not success: if not success:
log(f"{cmd_line}\n" print(f" > Failed to get object {output} from container {cid} \r\n"
f"Failed to get object {oid} from container {cid}\n" f" > Error: {output}")
f"Error: {output}", endpoint)
return False return False
return True return True
def search_object_by_id(cid, oid, endpoint, wallet_file, wallet_config, ttl=2): def search_object_by_id(cid, oid, endpoint, wallet_file, wallet_config, ttl=2):
if wallet_file: cmd_line = f"frostfs-cli object search --ttl {ttl} -r {endpoint} --cid {cid} --oid {oid} --wallet {wallet_file} --config {wallet_config} "
wallet_file = "--wallet " + wallet_file
if wallet_config:
wallet_config = "--config " + wallet_config
cmd_line = f"frostfs-cli object search --ttl {ttl} -r {endpoint} --cid {cid} --oid {oid} {wallet_file} {wallet_config} "
output, success = execute_cmd(cmd_line) output, success = execute_cmd(cmd_line)
if not success: if not success:
log(f"{cmd_line}\n" print(f" > Failed to search object {oid} for container {cid} \r\n"
f"Failed to search object {oid} for container {cid}\n" f" > Error: {output}")
f"Error: {output}", endpoint)
return False return False
re_rst = re.search(r'Found (\d+) objects', output) re_rst = re.search(r'Found (\d+) objects', output)
if not re_rst: if not re_rst:
raise Exception("Failed to parse search results") raise Exception("Failed to parce search results")
return re_rst.group(1) return re_rst.group(1)

View file

@ -1,129 +1,104 @@
#!/usr/bin/python3 #!/usr/bin/python3
import argparse import argparse
from itertools import cycle
import json import json
import random
import sys import sys
import tempfile
import time
from argparse import Namespace from argparse import Namespace
from concurrent.futures import ProcessPoolExecutor from concurrent.futures import ProcessPoolExecutor
from helpers.cmd import random_payload from helpers.cmd import random_payload
from helpers.frostfs_cli import create_container, upload_object from helpers.frostfs_cli import create_container, upload_object
ERROR_WRONG_CONTAINERS_COUNT = 1 ERROR_NO_CONTAINERS = 1
ERROR_WRONG_OBJECTS_COUNT = 2 ERROR_NO_OBJECTS = 2
MAX_WORKERS = 50 MAX_WORKERS = 50
DEFAULT_POLICY = "REP 2 IN X CBF 2 SELECT 2 FROM * AS X"
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument('--size', help='Upload objects size in kb') parser.add_argument('--size', help='Upload objects size in kb')
parser.add_argument('--containers', help='Number of containers to create') parser.add_argument('--containers', help='Number of containers to create')
parser.add_argument('--retry', default=20, help='Maximum number of retries to create a container')
parser.add_argument('--out', help='JSON file with output') parser.add_argument('--out', help='JSON file with output')
parser.add_argument('--preload_obj', help='Number of pre-loaded objects') parser.add_argument('--preload_obj', help='Number of pre-loaded objects')
parser.add_argument('--wallet', help='Wallet file path') parser.add_argument('--wallet', help='Wallet file path')
parser.add_argument('--config', help='Wallet config file path') parser.add_argument('--config', help='Wallet config file path')
parser.add_argument( parser.add_argument(
"--policy", "--policy",
help=f"Container placement policy. Default is {DEFAULT_POLICY}", help="Container placement policy",
action="append" default="REP 2 IN X CBF 2 SELECT 2 FROM * AS X"
) )
parser.add_argument('--endpoint', help='Nodes addresses separated by comma.') parser.add_argument('--endpoint', help='Node address')
parser.add_argument('--update', help='Save existed containers') parser.add_argument('--update', help='Save existed containers')
parser.add_argument('--ignore-errors', help='Ignore preset errors', action='store_true') parser.add_argument('--ignore-errors', help='Ignore preset errors')
parser.add_argument('--workers', help='Count of workers in preset. Max = 50, Default = 50', default=50) parser.add_argument('--workers', help='Count of workers in preset. Max = 50, Default = 50', default=50)
parser.add_argument('--sleep', help='Time to sleep between containers creation and objects upload (in seconds), '
'Default = 8', default=8)
parser.add_argument('--local', help='Create containers that store data on provided endpoints. Warning: additional empty containers may be created.', action='store_true')
parser.add_argument('--acl', help='Container ACL. Default is public-read-write.', default='public-read-write')
args: Namespace = parser.parse_args() args: Namespace = parser.parse_args()
print(args) print(args)
def main(): def main():
containers = [] container_list = []
objects_list = [] objects_list = []
payload_filepath = '/tmp/data_file'
endpoints = args.endpoint.split(',') endpoints = args.endpoint.split(',')
if not args.policy:
args.policy = [DEFAULT_POLICY]
container_creation_retry = args.retry
wallet = args.wallet wallet = args.wallet
wallet_config = args.config wallet_config = args.config
workers = int(args.workers) workers = int(args.workers)
objects_per_container = int(args.preload_obj) ignore_errors = True if args.ignore_errors else False
ignore_errors = args.ignore_errors
if args.update: if args.update:
# Open file # Open file
with open(args.out) as f: with open(args.out) as f:
data_json = json.load(f) data_json = json.load(f)
containers = data_json['containers'] container_list = data_json['containers']
containers_count = len(containers)
else: else:
containers_count = int(args.containers) print(f"Create containers: {args.containers}")
print(f"Create containers: {containers_count}")
with ProcessPoolExecutor(max_workers=min(MAX_WORKERS, workers)) as executor: with ProcessPoolExecutor(max_workers=min(MAX_WORKERS, workers)) as executor:
containers_runs = [executor.submit(create_container, endpoint, policy, container_creation_retry, wallet, wallet_config, args.acl, args.local) containers_runs = {executor.submit(create_container, endpoints[random.randrange(len(endpoints))],
for _, endpoint, policy in args.policy, wallet, wallet_config): _ for _ in range(int(args.containers))}
zip(range(containers_count), cycle(endpoints), cycle(args.policy))]
for run in containers_runs: for run in containers_runs:
container_id = run.result() if run.result():
if container_id: container_list.append(run.result())
containers.append(container_id)
print("Create containers: Completed") print("Create containers: Completed")
print(f" > Containers: {containers}") print(f" > Containers: {container_list}")
if containers_count > 0 and len(containers) != containers_count: if not container_list:
print(f"Containers mismatch in preset: expected {containers_count}, created {len(containers)}") print("No containers to work with")
if not ignore_errors: if not ignore_errors:
sys.exit(ERROR_WRONG_CONTAINERS_COUNT) sys.exit(ERROR_NO_CONTAINERS)
if args.sleep != 0:
print(f"Sleep for {args.sleep} seconds")
time.sleep(args.sleep)
print(f"Upload objects to each container: {args.preload_obj} ") print(f"Upload objects to each container: {args.preload_obj} ")
payload_file = tempfile.NamedTemporaryFile() random_payload(payload_filepath, args.size)
random_payload(payload_file, args.size)
print(" > Create random payload: Completed") print(" > Create random payload: Completed")
total_objects = objects_per_container * containers_count for container in container_list:
with ProcessPoolExecutor(max_workers=min(MAX_WORKERS, workers)) as executor: print(f" > Upload objects for container {container}")
objects_runs = [executor.submit(upload_object, container, payload_file.name, with ProcessPoolExecutor(max_workers=min(MAX_WORKERS, workers)) as executor:
endpoint, wallet, wallet_config) objects_runs = {executor.submit(upload_object, container, payload_filepath,
for _, container, endpoint in endpoints[random.randrange(len(endpoints))], wallet, wallet_config): _ for _ in range(int(args.preload_obj))}
zip(range(total_objects), cycle(containers), cycle(endpoints))]
for run in objects_runs: for run in objects_runs:
result = run.result() if run.result():
if result: objects_list.append({'container': container, 'object': run.result()})
container_id = result[0] print(f" > Upload objects for container {container}: Completed")
endpoint = result[1]
object_id = result[2]
objects_list.append({'container': container_id, 'object': object_id})
print(f" > Uploaded object {object_id} for container {container_id} via endpoint {endpoint}.")
print("Upload objects to each container: Completed") print("Upload objects to each container: Completed")
if total_objects > 0 and len(objects_list) != total_objects: if int(args.preload_obj) > 0 and not objects_list:
print(f"Objects mismatch in preset: expected {total_objects}, created {len(objects_list)}") print("No objects were uploaded")
if not ignore_errors: if not ignore_errors:
sys.exit(ERROR_WRONG_OBJECTS_COUNT) sys.exit(ERROR_NO_OBJECTS)
data = {'containers': containers, 'objects': objects_list, 'obj_size': args.size + " Kb"} data = {'containers': container_list, 'objects': objects_list, 'obj_size': args.size + " Kb"}
with open(args.out, 'w+') as f: with open(args.out, 'w+') as f:
json.dump(data, f, ensure_ascii=False, indent=2) json.dump(data, f, ensure_ascii=False, indent=2)
print("Result:") print("Result:")
print(f" > Total Containers has been created: {len(containers)}.") print(f" > Total Containers has been created: {len(container_list)}.")
print(f" > Total Objects has been created: {len(objects_list)}.") print(f" > Total Objects has been created: {len(objects_list)}.")

View file

@ -1,132 +1,95 @@
#!/usr/bin/python3 #!/usr/bin/python3
import argparse import argparse
from itertools import cycle
import json import json
import sys import sys
import tempfile
import time
from concurrent.futures import ProcessPoolExecutor from concurrent.futures import ProcessPoolExecutor
from helpers.cmd import random_payload from helpers.cmd import random_payload
from helpers.aws_cli import create_bucket, upload_object from helpers.aws_cli import create_bucket, upload_object
ERROR_WRONG_CONTAINERS_COUNT = 1
ERROR_WRONG_OBJECTS_COUNT = 2
ERROR_WRONG_PERCENTAGE = 3
MAX_WORKERS = 50
DEFAULT_LOCATION = ""
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument('--size', help='Upload objects size in kb.') parser.add_argument('--size', help='Upload objects size in kb.')
parser.add_argument('--buckets', help='Number of buckets to create.') parser.add_argument('--buckets', help='Number of buckets to create.')
parser.add_argument('--out', help='JSON file with output.') parser.add_argument('--out', help='JSON file with output.')
parser.add_argument('--preload_obj', help='Number of pre-loaded objects.') parser.add_argument('--preload_obj', help='Number of pre-loaded objects.')
parser.add_argument('--endpoint', help='S3 Gateways addresses separated by comma.') parser.add_argument('--endpoint', help='S3 Gateway address.')
parser.add_argument('--update', help='True/False, False by default. Save existed buckets from target file (--out). ' parser.add_argument('--update', help='True/False, False by default. Save existed buckets from target file (--out). '
'New buckets will not be created.') 'New buckets will not be created.')
parser.add_argument('--location', help=f'AWS location constraint. Default is "{DEFAULT_LOCATION}"', action="append") parser.add_argument('--location', help='AWS location. Will be empty, if has not be declared.', default="")
parser.add_argument('--versioning', help='True/False, False by default. Alias of --buckets_versioned=100') parser.add_argument('--versioning', help='True/False, False by default.')
parser.add_argument('--buckets_versioned', help='Percent of versioned buckets. Default is 0', default=0) parser.add_argument('--ignore-errors', help='Ignore preset errors')
parser.add_argument('--ignore-errors', help='Ignore preset errors', action='store_true')
parser.add_argument('--no-verify-ssl', help='Ignore SSL verifications', action='store_true')
parser.add_argument('--workers', help='Count of workers in preset. Max = 50, Default = 50', default=50) parser.add_argument('--workers', help='Count of workers in preset. Max = 50, Default = 50', default=50)
parser.add_argument('--sleep', help='Time to sleep between buckets creation and objects upload (in seconds), '
'Default = 8', default=8)
parser.add_argument('--acl', help='Bucket ACL. Default is private. Expected values are: private, public-read or public-read-write.', default="private")
args = parser.parse_args() args = parser.parse_args()
print(args) print(args)
def main(): ERROR_NO_BUCKETS = 1
buckets = [] ERROR_NO_OBJECTS = 2
objects_list = [] MAX_WORKERS = 50
ignore_errors = args.ignore_errors
no_verify_ssl = args.no_verify_ssl
endpoints = args.endpoint.split(',') def main():
if not args.location: bucket_list = []
args.location = [DEFAULT_LOCATION] objects_list = []
payload_filepath = '/tmp/data_file'
ignore_errors = True if args.ignore_errors else False
workers = int(args.workers) workers = int(args.workers)
objects_per_bucket = int(args.preload_obj)
if args.update: if args.update:
# Open file # Open file
with open(args.out) as f: with open(args.out) as f:
data_json = json.load(f) data_json = json.load(f)
buckets = data_json['buckets'] bucket_list = data_json['buckets']
buckets_count = len(buckets)
# Get CID list # Get CID list
else: else:
buckets_count = int(args.buckets) print(f"Create buckets: {args.buckets}")
print(f"Create buckets: {buckets_count}")
with ProcessPoolExecutor(max_workers=min(MAX_WORKERS, workers)) as executor: with ProcessPoolExecutor(max_workers=min(MAX_WORKERS, workers)) as executor:
if not 0 <= int(args.buckets_versioned) <= 100: buckets_runs = {executor.submit(create_bucket, args.endpoint, args.versioning,
print(f"Percent of versioned buckets must be between 0 and 100: got {args.buckets_versioned}") args.location): _ for _ in range(int(args.buckets))}
if not ignore_errors:
sys.exit(ERROR_WRONG_PERCENTAGE)
if args.versioning == "True":
versioning_per_bucket = [True] * buckets_count
else:
num_versioned_buckets = int((int(args.buckets_versioned) / 100) * buckets_count)
versioning_per_bucket = [True] * num_versioned_buckets + [False] * (buckets_count - num_versioned_buckets)
buckets_runs = [executor.submit(create_bucket, endpoint, versioning_per_bucket[i], location, args.acl, no_verify_ssl)
for i, endpoint, location in
zip(range(buckets_count), cycle(endpoints), cycle(args.location))]
for run in buckets_runs: for run in buckets_runs:
bucket_name = run.result() if run.result() is not None:
if bucket_name: bucket_list.append(run.result())
buckets.append(bucket_name)
print("Create buckets: Completed") print("Create buckets: Completed")
print(f" > Buckets: {buckets}") print(f" > Buckets: {bucket_list}")
if buckets_count > 0 and len(buckets) != buckets_count: if not bucket_list:
print(f"Buckets mismatch in preset: expected {buckets_count}, created {len(buckets)}") print("No buckets to work with")
if not ignore_errors: if not ignore_errors:
sys.exit(ERROR_WRONG_CONTAINERS_COUNT) sys.exit(ERROR_NO_BUCKETS)
if args.sleep != 0: print(f"Upload objects to each bucket: {args.preload_obj} ")
print(f"Sleep for {args.sleep} seconds") random_payload(payload_filepath, args.size)
time.sleep(args.sleep)
print(f"Upload objects to each bucket: {objects_per_bucket} ")
payload_file = tempfile.NamedTemporaryFile()
random_payload(payload_file, args.size)
print(" > Create random payload: Completed") print(" > Create random payload: Completed")
total_objects = objects_per_bucket * buckets_count for bucket in bucket_list:
print(f" > Upload objects for bucket {bucket}")
with ProcessPoolExecutor(max_workers=min(MAX_WORKERS, workers)) as executor:
objects_runs = {executor.submit(upload_object, bucket, payload_filepath,
args.endpoint): _ for _ in range(int(args.preload_obj))}
with ProcessPoolExecutor(max_workers=min(MAX_WORKERS, workers)) as executor: for run in objects_runs:
objects_runs = [executor.submit(upload_object, bucket, payload_file.name, endpoint, no_verify_ssl) if run.result() is not None:
for _, bucket, endpoint in objects_list.append({'bucket': bucket, 'object': run.result()})
zip(range(total_objects), cycle(buckets), cycle(endpoints))] print(f" > Upload objects for bucket {bucket}: Completed")
for run in objects_runs: print("Upload objects to each bucket: Completed")
result = run.result()
if result:
bucket = result[0]
endpoint = result[1]
object_id = result[2]
objects_list.append({'bucket': bucket, 'object': object_id})
print(f" > Uploaded object {object_id} for bucket {bucket} via endpoint {endpoint}.")
if total_objects > 0 and len(objects_list) != total_objects: if int(args.preload_obj) > 0 and not objects_list:
print(f"Objects mismatch in preset: expected {total_objects}, created {len(objects_list)}") print("No objects were uploaded")
if not ignore_errors: if not ignore_errors:
sys.exit(ERROR_WRONG_OBJECTS_COUNT) sys.exit(ERROR_NO_OBJECTS)
data = {'buckets': buckets, 'objects': objects_list, 'obj_size': args.size + " Kb"} data = {'buckets': bucket_list, 'objects': objects_list, 'obj_size': args.size + " Kb"}
with open(args.out, 'w+') as f: with open(args.out, 'w+') as f:
json.dump(data, f, ensure_ascii=False, indent=2) json.dump(data, f, ensure_ascii=False, indent=2)
print("Result:") print("Result:")
print(f" > Total Buckets has been created: {len(buckets)}.") print(f" > Total Buckets has been created: {len(bucket_list)}.")
print(f" > Total Objects has been created: {len(objects_list)}.") print(f" > Total Objects has been created: {len(objects_list)}.")

View file

@ -1,33 +0,0 @@
#!/usr/bin/python3
import argparse
import json
import http.client
import ssl
parser = argparse.ArgumentParser()
parser.add_argument('--endpoint', help='Endpoint of the S3 gateway')
parser.add_argument('--preset_file', help='JSON file path with s3 preset')
args = parser.parse_args()
def main():
with open(args.preset_file) as f:
preset_text = f.read()
preset = json.loads(preset_text)
conn = http.client.HTTPSConnection(args.endpoint, context = ssl._create_unverified_context())
containers = []
for bucket in preset.get('buckets'):
conn.request("HEAD", f'/{bucket}')
response = conn.getresponse()
containers.append(response.getheader('X-Container-Id'))
response.read()
preset['containers'] = containers
with open(args.preset_file, 'w+') as f:
json.dump(preset, f, ensure_ascii=False, indent=2)
if __name__ == "__main__":
main()

View file

@ -1,14 +1,10 @@
---
# How to execute scenarios # How to execute scenarios
**Note:** you can provide file with all environment variables (system env variables overrides env from file) using ## Common options for gRPC, HTTP, S3 scenarios:
`-e ENV_FILE=.env` (relative path to that file must start from working directory):
```shell
$ ./k6 run -e ENV_FILE=.env some-scenario.js
```
## Common options for all scenarios: Scenarios `grpc.js`, `http.js` and `s3.js` support the following options:
Scenarios `grpc.js`, `local.js`, `http.js` and `s3.js` support the following options:
* `DURATION` - duration of scenario in seconds. * `DURATION` - duration of scenario in seconds.
* `READERS` - number of VUs performing read operations. * `READERS` - number of VUs performing read operations.
* `WRITERS` - number of VUs performing write operations. * `WRITERS` - number of VUs performing write operations.
@ -19,18 +15,6 @@ Scenarios `grpc.js`, `local.js`, `http.js` and `s3.js` support the following opt
* `SLEEP_READ` - time interval (in seconds) between reading VU iterations. * `SLEEP_READ` - time interval (in seconds) between reading VU iterations.
* `SELECTION_SIZE` - size of batch to select for deletion (default: 1000). * `SELECTION_SIZE` - size of batch to select for deletion (default: 1000).
* `PAYLOAD_TYPE` - type of an object payload ("random" or "text", default: "random"). * `PAYLOAD_TYPE` - type of an object payload ("random" or "text", default: "random").
* `STREAMING` - if set, the payload is generated on the fly and is not read into memory fully.
* `METRIC_TAGS` - custom metrics tags (format `tag1:value1;tag2:value2`).
Additionally, the profiling extension can be enabled to generate CPU and memory profiles which can be inspected with `go tool pprof file.prof`:
```shell
$ ./k6 run --out profile (...)
```
The profiles are saved in the current directory as `cpu.prof` and `mem.prof`, respectively.
## Common options for the local scenarios:
* `DEBUG_LOGGER` - uses a development logger for the local storage engine to aid debugging (default: false).
Examples of how to use these options are provided below for each scenario. Examples of how to use these options are provided below for each scenario.
@ -59,28 +43,6 @@ Options (in addition to the common options):
* `DIAL_TIMEOUT` - timeout to connect to a node (in seconds). * `DIAL_TIMEOUT` - timeout to connect to a node (in seconds).
* `STREAM_TIMEOUT` - timeout for a single stream message for `PUT`/`GET` operations (in seconds). * `STREAM_TIMEOUT` - timeout for a single stream message for `PUT`/`GET` operations (in seconds).
## Local
1. Create pre-generated containers or objects:
The tests will use all pre-created containers for PUT operations and all pre-created objects for READ operations. There is no dedicated script to preset HTTP scenario, so we use the same script as for gRPC:
```shell
$ ./scenarios/preset/preset_grpc.py --size 1024 --containers 1 --out grpc.json --endpoint host1:8080 --preload_obj 500
```
2. Execute scenario with options:
```shell
$ ./k6 run -e DURATION=60 -e WRITE_OBJ_SIZE=8192 -e READERS=20 -e WRITERS=20 -e DELETERS=30 -e DELETE_AGE=10 -e REGISTRY_FILE=registry.bolt -e CONFIG_FILE=/path/to/config.yaml -e CONFIG_DIR=/path/to/dir/ -e PREGEN_JSON=./grpc.json scenarios/local.js
```
Options (in addition to the common options):
* `CONFIG_FILE` - path to the local configuration file used for the storage node. Only the storage configuration section is used.
* `CONFIG_DIR` - path to the folder with local configuration files used for the storage node.
* `DELETERS` - number of VUs performing delete operations (using deleters requires that options `DELETE_AGE` and `REGISTRY_FILE` are specified as well).
* `DELETE_AGE` - age of object in seconds before which it can not be deleted. This parameter can be used to control how many objects we have in the system under load.
* `MAX_TOTAL_SIZE_GB` - if specified, max payload size in GB of the storage engine. If the storage engine is already full, no new objects will be saved.
## HTTP ## HTTP
1. Create pre-generated containers or objects: 1. Create pre-generated containers or objects:
@ -125,7 +87,7 @@ The tests will use all pre-created buckets for PUT operations and all pre-create
$ ./scenarios/preset/preset_s3.py --size 1024 --buckets 1 --out s3_1024kb.json --endpoint host1:8084 --preload_obj 500 --location load-1-4 $ ./scenarios/preset/preset_s3.py --size 1024 --buckets 1 --out s3_1024kb.json --endpoint host1:8084 --preload_obj 500 --location load-1-4
``` ```
* '--location' - specify the name of container policy (from policy.json file). It's important to run 'aws configure' each time when the policy file has been changed to pick up the latest policies. * '--location' - specify the name of container policy (from policy.json file). It's important to run 'aws configure' each time when the policy file has been changed to pick up the latest policies.
* '--buckets_versioned' - specify the percentage of versioned buckets from the total number of created buckets. Default is 0
3. Execute scenario with options: 3. Execute scenario with options:
```shell ```shell
@ -138,93 +100,6 @@ Options (in addition to the common options):
* `DELETE_AGE` - age of object in seconds before which it can not be deleted. This parameter can be used to control how many objects we have in the system under load. * `DELETE_AGE` - age of object in seconds before which it can not be deleted. This parameter can be used to control how many objects we have in the system under load.
* `SLEEP_DELETE` - time interval (in seconds) between deleting VU iterations. * `SLEEP_DELETE` - time interval (in seconds) between deleting VU iterations.
* `OBJ_NAME` - if specified, this name will be used for all write operations instead of random generation. * `OBJ_NAME` - if specified, this name will be used for all write operations instead of random generation.
* `OBJ_NAME_LENGTH` - if specified, then name of the object will be generated with the specified length of ASCII characters.
* `DIR_HEIGHT`, `DIR_WIDTH` - if both specified, object name will consist of `DIR_HEIGHT` directories, each of which can have `DIR_WIDTH` subdirectories, for example for `DIR_HEIGHT = 3, DIR_WIDTH = 100`, object names will be `/dir{1...100}/dir{1...100}/dir{1...100}/{uuid || OBJ_NAME}`
## S3 Multipart
Perform multipart upload operation, break up large objects, so they can be transferred in multiple parts, in parallel
```shell
$ ./k6 run -e DURATION=600 \
-e WRITERS=400 -e WRITERS_MULTIPART=10 \
-e WRITE_OBJ_SIZE=524288 -e WRITE_OBJ_PART_SIZE=10240 \
-e S3_ENDPOINTS=10.78.70.142:8084,10.78.70.143:8084,10.78.70.144:8084,10.78.70.145:8084 \
-e PREGEN_JSON=/home/service/s3_4kb.json \
scenarios/s3_multipart.js
```
Options:
* `DURATION` - duration of scenario in seconds.
* `REGISTRY_FILE` - if set, all produced objects will be stored in database for subsequent verification. Database file name will be set to the value of `REGISTRY_FILE`.
* `PREGEN_JSON` - path to json file with pre-generated containers.
* `SLEEP_WRITE` - time interval (in seconds) between writing VU iterations.
* `PAYLOAD_TYPE` - type of an object payload ("random" or "text", default: "random").
* `S3_ENDPOINTS` - - endpoints of S3 gateways in format `host:port`. To specify multiple endpoints separate them by comma.
* `WRITERS` - number of VUs performing upload payload operation
* `WRITERS_MULTIPART` - number of goroutines that will upload parts in parallel
* `WRITE_OBJ_SIZE` - object size in kb for write(PUT) operations.
* `WRITE_OBJ_PART_SIZE` - part size in kb for multipart upload operations (must be greater or equal 5mb).
## S3 Local
1. Follow steps 1. and 2. from the normal S3 scenario in order to obtain credentials and a preset file with the information about the buckets and objects that were pre-created.
2. Assuming the preset file was named `pregen.json`, we need to populate the bucket-to-container mapping before running the local S3 scenario:
**WARNING**: Be aware that this command will overwrite the `containers` list field in `pregen.json` file. Make a backup if needed beforehand.
```shell
$ ./scenarios/preset/resolve_containers_in_preset.py --endpoint s3host:8080 --preset_file pregen.json
```
After this, the `pregen.json` file will contain a `containers` list field the same length as `buckets`, which is the mapping of bucket name to container ID in the order they appear.
3. Execute the scenario with the desired options. For example:
```shell
$ ./k6 run -e DURATION=60 -e WRITE_OBJ_SIZE=8192 -e READERS=20 -e WRITERS=20 -e CONFIG_FILE=/path/to/node/config.yml -e CONFIG_DIR=/path/to/dir/ -e PREGEN_JSON=pregen.json scenarios/s3local.js
```
Note that the `s3local` scenario currently does not support deleters.
Options (in addition to the common options):
* `OBJ_NAME` - if specified, this name will be used for all write operations instead of random generation.
* `MAX_TOTAL_SIZE_GB` - if specified, max payload size in GB of the storage engine. If the storage engine is already full, no new objects will be saved.
## Export metrics
To export metrics to Prometheus (also Grafana and Victoria Metrics support Prometheus format), you need to run `k6` with an option `-o experimental-prometheus-rw` and
an environment variable `K6_PROMETHEUS_RW_SERVER_URL` whose value corresponds to the URL for the remote write endpoint.
To specify percentiles for trend metrics, use an environment variable `K6_PROMETHEUS_RW_TREND_STATS`.
See [k6 docs](https://k6.io/docs/results-output/real-time/prometheus-remote-write/) for a list of all possible options.
To distinct metrics from different loaders, use an option `METRIC_TAGS`. These tags does not apply to builtin `k6` metrics.
Example:
```bash
K6_PROMETHEUS_RW_SERVER_URL=http://host:8428/api/v1/write \
K6_PROMETHEUS_RW_TREND_STATS="p(95),p(99),min,max" \
./k6 run ... -o experimental-prometheus-rw -e METRIC_TAGS="instance:server1;run:run1" scenario.js
```
## Grafana annotations
There is no option to export Grafana annotaions, but it can be easily done with `curl` and Grafana's annotations API.
Example:
```shell
curl --request POST \
--url https://user:password@grafana.host/api/annotations \
--header 'Content-Type: application/json' \
--data '{
"dashboardUID": "YsVWNpMIk",
"time": 1706533045014,
"timeEnd": 1706533085100,
"tags": [
"tag1",
"tag2"
],
"text": "Test annotation"
}'
```
See [Grafana docs](https://grafana.com/docs/grafana/latest/developers/http_api/annotations/) for details.
## Verify ## Verify

View file

@ -1,226 +1,172 @@
import {sleep} from 'k6'; import datagen from 'k6/x/frostfs/datagen';
import {SharedArray} from 'k6/data';
import exec from 'k6/execution';
import logging from 'k6/x/frostfs/logging';
import registry from 'k6/x/frostfs/registry'; import registry from 'k6/x/frostfs/registry';
import s3 from 'k6/x/frostfs/s3'; import s3 from 'k6/x/frostfs/s3';
import stats from 'k6/x/frostfs/stats'; import { SharedArray } from 'k6/data';
import { sleep } from 'k6';
import { textSummary } from './libs/k6-summary-0.0.2.js';
import {generateS3Key} from './libs/keygen.js'; const obj_list = new SharedArray('obj_list', function () {
import {parseEnv} from './libs/env-parser.js'; return JSON.parse(open(__ENV.PREGEN_JSON)).objects;
import {textSummary} from './libs/k6-summary-0.0.2.js'; });
import {newGenerator} from './libs/datagen.js';
parseEnv(); const bucket_list = new SharedArray('bucket_list', function () {
return JSON.parse(open(__ENV.PREGEN_JSON)).buckets;
const obj_list = new SharedArray( });
'obj_list',
function() { return JSON.parse(open(__ENV.PREGEN_JSON)).objects; });
const bucket_list = new SharedArray(
'bucket_list',
function() { return JSON.parse(open(__ENV.PREGEN_JSON)).buckets; });
const read_size = JSON.parse(open(__ENV.PREGEN_JSON)).obj_size; const read_size = JSON.parse(open(__ENV.PREGEN_JSON)).obj_size;
const summary_json = __ENV.SUMMARY_JSON || '/tmp/summary.json'; const summary_json = __ENV.SUMMARY_JSON || "/tmp/summary.json";
const no_verify_ssl = __ENV.NO_VERIFY_SSL || 'true';
const connection_args = {
no_verify_ssl : no_verify_ssl
}
// Select random S3 endpoint for current VU // Select random S3 endpoint for current VU
const s3_endpoints = __ENV.S3_ENDPOINTS.split(','); const s3_endpoints = __ENV.S3_ENDPOINTS.split(',');
const s3_endpoint = const s3_endpoint = s3_endpoints[Math.floor(Math.random() * s3_endpoints.length)];
s3_endpoints[Math.floor(Math.random() * s3_endpoints.length)]; const s3_client = s3.connect(`http://${s3_endpoint}`);
const s3_client = s3.connect(s3_endpoint, connection_args); const log = logging.new().withField("endpoint", s3_endpoint);
const log = logging.new().withField('endpoint', s3_endpoint);
const registry_enabled = !!__ENV.REGISTRY_FILE; const registry_enabled = !!__ENV.REGISTRY_FILE;
const obj_registry = const obj_registry = registry_enabled ? registry.open(__ENV.REGISTRY_FILE) : undefined;
registry_enabled ? registry.open(__ENV.REGISTRY_FILE) : undefined;
const duration = __ENV.DURATION; const duration = __ENV.DURATION;
if (!!__ENV.METRIC_TAGS) { const delete_age = __ENV.DELETE_AGE ? parseInt(__ENV.DELETE_AGE) : undefined;
stats.setTags(__ENV.METRIC_TAGS) let obj_to_delete_selector = undefined;
if (registry_enabled && delete_age) {
obj_to_delete_selector = registry.getSelector(
__ENV.REGISTRY_FILE,
"obj_to_delete",
__ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0,
{
status: "created",
age: delete_age,
}
);
} }
const read_age = __ENV.READ_AGE ? parseInt(__ENV.READ_AGE) : 10; const generator = datagen.generator(1024 * parseInt(__ENV.WRITE_OBJ_SIZE), __ENV.PAYLOAD_TYPE || "");
let obj_to_read_selector = undefined;
if (registry_enabled) {
obj_to_read_selector = registry.getLoopedSelector(
__ENV.REGISTRY_FILE, 'obj_to_read',
__ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0, {
status : 'created',
age : read_age,
})
}
const scenarios = {}; const scenarios = {};
const write_vu_count = parseInt(__ENV.WRITERS || '0'); const write_vu_count = parseInt(__ENV.WRITERS || '0');
const generator = newGenerator(write_vu_count > 0);
if (write_vu_count > 0) { if (write_vu_count > 0) {
scenarios.write = { scenarios.write = {
executor : 'constant-vus', executor: 'constant-vus',
vus : write_vu_count, vus: write_vu_count,
duration : `${duration}s`, duration: `${duration}s`,
exec : 'obj_write', exec: 'obj_write',
gracefulStop : '5s', gracefulStop: '5s',
}; };
}
const delete_age = __ENV.DELETE_AGE ? parseInt(__ENV.DELETE_AGE) : undefined;
let obj_to_delete_selector = undefined;
let obj_to_delete_exit_on_null = undefined;
if (registry_enabled && delete_age) {
obj_to_delete_exit_on_null = write_vu_count == 0;
let constructor = obj_to_delete_exit_on_null ? registry.getOneshotSelector
: registry.getSelector;
obj_to_delete_selector =
constructor(__ENV.REGISTRY_FILE, 'obj_to_delete',
__ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0, {
status : 'created',
age : delete_age,
});
} }
const read_vu_count = parseInt(__ENV.READERS || '0'); const read_vu_count = parseInt(__ENV.READERS || '0');
if (read_vu_count > 0) { if (read_vu_count > 0) {
scenarios.read = { scenarios.read = {
executor : 'constant-vus', executor: 'constant-vus',
vus : read_vu_count, vus: read_vu_count,
duration : `${duration}s`, duration: `${duration}s`,
exec : 'obj_read', exec: 'obj_read',
gracefulStop : '5s', gracefulStop: '5s',
}; };
} }
const delete_vu_count = parseInt(__ENV.DELETERS || '0'); const delete_vu_count = parseInt(__ENV.DELETERS || '0');
if (delete_vu_count > 0) { if (delete_vu_count > 0) {
if (!obj_to_delete_selector) { if (!obj_to_delete_selector) {
throw 'Positive DELETE worker number without a proper object selector'; throw 'Positive DELETE worker number without a proper object selector';
} }
scenarios.delete = { scenarios.delete = {
executor : 'constant-vus', executor: 'constant-vus',
vus : delete_vu_count, vus: delete_vu_count,
duration : `${duration}s`, duration: `${duration}s`,
exec : 'obj_delete', exec: 'obj_delete',
gracefulStop : '5s', gracefulStop: '5s',
}; };
} }
export const options = { export const options = {
scenarios, scenarios,
setupTimeout : '5s', setupTimeout: '5s',
}; };
export function setup() { export function setup() {
const total_vu_count = write_vu_count + read_vu_count + delete_vu_count; const total_vu_count = write_vu_count + read_vu_count + delete_vu_count;
console.log(`Pregenerated buckets: ${bucket_list.length}`); console.log(`Pregenerated buckets: ${bucket_list.length}`);
console.log(`Pregenerated read object size: ${read_size}`); console.log(`Pregenerated read object size: ${read_size}`);
console.log(`Pregenerated total objects: ${obj_list.length}`); console.log(`Pregenerated total objects: ${obj_list.length}`);
console.log(`Reading VUs: ${read_vu_count}`); console.log(`Reading VUs: ${read_vu_count}`);
console.log(`Writing VUs: ${write_vu_count}`); console.log(`Writing VUs: ${write_vu_count}`);
console.log(`Deleting VUs: ${delete_vu_count}`); console.log(`Deleting VUs: ${delete_vu_count}`);
console.log(`Total VUs: ${total_vu_count}`); console.log(`Total VUs: ${total_vu_count}`);
const start_timestamp = Date.now()
console.log(
`Load started at: ${Date(start_timestamp).toString()}`)
if (delete_vu_count > 0){
obj_to_delete_selector.sync.add(delete_vu_count)
}
} }
export function teardown(data) { export function teardown(data) {
if (obj_registry) { if (obj_registry) {
obj_registry.close(); obj_registry.close();
} }
const end_timestamp = Date.now()
console.log(
`Load finished at: ${Date(end_timestamp).toString()}`)
} }
export function handleSummary(data) { export function handleSummary(data) {
return { return {
'stdout' : textSummary(data, {indent : ' ', enableColors : false}), 'stdout': textSummary(data, { indent: ' ', enableColors: false }),
[summary_json] : JSON.stringify(data), [summary_json]: JSON.stringify(data),
}; };
} }
export function obj_write() { export function obj_write() {
if (__ENV.SLEEP_WRITE) { if (__ENV.SLEEP_WRITE) {
sleep(__ENV.SLEEP_WRITE); sleep(__ENV.SLEEP_WRITE);
} }
const key = generateS3Key(); const key = __ENV.OBJ_NAME || uuidv4();
const bucket = bucket_list[Math.floor(Math.random() * bucket_list.length)]; const bucket = bucket_list[Math.floor(Math.random() * bucket_list.length)];
const payload = generator.genPayload(); const { payload, hash } = generator.genPayload(registry_enabled);
const resp = s3_client.put(bucket, key, payload); const resp = s3_client.put(bucket, key, payload);
if (!resp.success) { if (!resp.success) {
log.withFields({bucket : bucket, key : key}).error(resp.error); log.withFields({bucket: bucket, key: key}).error(resp.error);
return; return;
} }
if (obj_registry) { if (obj_registry) {
obj_registry.addObject('', '', bucket, key, payload.hash()); obj_registry.addObject("", "", bucket, key, hash);
} }
} }
export function obj_read() { export function obj_read() {
if (__ENV.SLEEP_READ) { if (__ENV.SLEEP_READ) {
sleep(__ENV.SLEEP_READ); sleep(__ENV.SLEEP_READ);
}
if (obj_to_read_selector) {
const obj = obj_to_read_selector.nextObject();
if (!obj) {
return;
} }
const resp = s3_client.get(obj.s3_bucket, obj.s3_key)
const obj = obj_list[Math.floor(Math.random() * obj_list.length)];
const resp = s3_client.get(obj.bucket, obj.object);
if (!resp.success) { if (!resp.success) {
log.withFields({bucket : obj.s3_bucket, key : obj.s3_key}) log.withFields({bucket: obj.bucket, key: obj.object}).error(resp.error);
.error(resp.error);
} }
return
}
const obj = obj_list[Math.floor(Math.random() * obj_list.length)];
const resp = s3_client.get(obj.bucket, obj.object);
if (!resp.success) {
log.withFields({bucket : obj.bucket, key : obj.object}).error(resp.error);
}
} }
export function obj_delete() { export function obj_delete() {
if (__ENV.SLEEP_DELETE) { if (__ENV.SLEEP_DELETE) {
sleep(__ENV.SLEEP_DELETE); sleep(__ENV.SLEEP_DELETE);
}
const obj = obj_to_delete_selector.nextObject();
if (!obj) {
if (obj_to_delete_exit_on_null) {
obj_to_delete_selector.sync.done()
obj_to_delete_selector.sync.wait()
exec.test.abort("No more objects to select");
} }
return;
}
const resp = s3_client.delete(obj.s3_bucket, obj.s3_key); const obj = obj_to_delete_selector.nextObject();
if (!resp.success) { if (!obj) {
log.withFields({bucket : obj.s3_bucket, key : obj.s3_key, op : 'DELETE'}) return;
.error(resp.error); }
return;
}
obj_registry.deleteObject(obj.id); const resp = s3_client.delete(obj.s3_bucket, obj.s3_key);
if (!resp.success) {
log.withFields({bucket: obj.s3_bucket, key: obj.s3_key, op: "DELETE"}).error(resp.error);
return;
}
obj_registry.deleteObject(obj.id);
}
export function uuidv4() {
return 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'.replace(/[xy]/g, function(c) {
let r = Math.random() * 16 | 0, v = c === 'x' ? r : (r & 0x3 | 0x8);
return v.toString(16);
});
} }

View file

@ -1,239 +0,0 @@
import {sleep} from 'k6';
import {SharedArray} from 'k6/data';
import logging from 'k6/x/frostfs/logging';
import registry from 'k6/x/frostfs/registry';
import s3 from 'k6/x/frostfs/s3';
import stats from 'k6/x/frostfs/stats';
import {generateS3Key} from './libs/keygen.js';
import {newGenerator} from './libs/datagen.js';
import {parseEnv} from './libs/env-parser.js';
import {textSummary} from './libs/k6-summary-0.0.2.js';
parseEnv();
const obj_list = new SharedArray('obj_list', function() {
return JSON.parse(open(__ENV.PREGEN_JSON)).objects;
});
const bucket_list = new SharedArray('bucket_list', function() {
return JSON.parse(open(__ENV.PREGEN_JSON)).buckets;
});
const read_size = JSON.parse(open(__ENV.PREGEN_JSON)).obj_size;
const summary_json = __ENV.SUMMARY_JSON || '/tmp/summary.json';
// Select random S3 endpoint for current VU
const s3_endpoints = __ENV.S3_ENDPOINTS.split(',');
const s3_endpoint =
s3_endpoints[Math.floor(Math.random() * s3_endpoints.length)];
const no_verify_ssl = __ENV.NO_VERIFY_SSL || 'true';
const connection_args = {
no_verify_ssl: no_verify_ssl
};
const s3_client = s3.connect(s3_endpoint, connection_args);
const log = logging.new().withField('endpoint', s3_endpoint);
const registry_enabled = !!__ENV.REGISTRY_FILE;
const obj_registry =
registry_enabled ? registry.open(__ENV.REGISTRY_FILE) : undefined;
const duration = __ENV.DURATION;
if (!!__ENV.METRIC_TAGS) {
stats.setTags(__ENV.METRIC_TAGS)
}
const delete_age = __ENV.DELETE_AGE ? parseInt(__ENV.DELETE_AGE) : undefined;
let obj_to_delete_selector = undefined;
if (registry_enabled && delete_age) {
obj_to_delete_selector = registry.getSelector(
__ENV.REGISTRY_FILE, 'obj_to_delete',
__ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0, {
status: 'created',
age: delete_age,
});
}
const read_age = __ENV.READ_AGE ? parseInt(__ENV.READ_AGE) : 10;
let obj_to_read_selector = undefined;
if (registry_enabled) {
obj_to_read_selector = registry.getLoopedSelector(
__ENV.REGISTRY_FILE, 'obj_to_read',
__ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0, {
status: 'created',
age: read_age,
})
}
const scenarios = {};
const time_unit = __ENV.TIME_UNIT || '1s';
const pre_alloc_write_vus = parseInt(__ENV.PRE_ALLOC_WRITERS || '0');
const max_write_vus = parseInt(__ENV.MAX_WRITERS || pre_alloc_write_vus);
const write_rate = parseInt(__ENV.WRITE_RATE || '0');
const generator = newGenerator(write_rate > 0);
if (write_rate > 0) {
scenarios.write = {
executor: 'constant-arrival-rate',
duration: `${duration}s`,
preAllocatedVUs: pre_alloc_write_vus,
maxVUs: max_write_vus,
rate: write_rate,
timeUnit: time_unit,
exec: 'obj_write',
gracefulStop: '5s',
};
}
const pre_alloc_read_vus = parseInt(__ENV.PRE_ALLOC_READERS || '0');
const max_read_vus = parseInt(__ENV.MAX_READERS || pre_alloc_read_vus);
const read_rate = parseInt(__ENV.READ_RATE || '0');
if (read_rate > 0) {
scenarios.read = {
executor: 'constant-arrival-rate',
duration: `${duration}s`,
preAllocatedVUs: pre_alloc_write_vus,
maxVUs: max_read_vus,
rate: read_rate,
timeUnit: time_unit,
exec: 'obj_read',
gracefulStop: '5s',
};
}
const pre_alloc_delete_vus = parseInt(__ENV.PRE_ALLOC_DELETERS || '0');
const max_delete_vus = parseInt(__ENV.MAX_DELETERS || pre_alloc_write_vus);
const delete_rate = parseInt(__ENV.DELETE_RATE || '0');
if (delete_rate > 0) {
if (!obj_to_delete_selector) {
throw new Error(
'Positive DELETE worker number without a proper object selector');
}
scenarios.delete = {
executor: 'constant-arrival-rate',
duration: `${duration}s`,
preAllocatedVUs: pre_alloc_delete_vus,
maxVUs: max_delete_vus,
rate: delete_rate,
timeUnit: time_unit,
exec: 'obj_delete',
gracefulStop: '5s',
};
}
export const options = {
scenarios,
setupTimeout: '5s',
};
export function setup() {
const total_pre_allocated_vu_count =
pre_alloc_write_vus + pre_alloc_read_vus + pre_alloc_delete_vus;
const total_max_vu_count = max_read_vus + max_write_vus + max_delete_vus
console.log(`Pregenerated buckets: ${bucket_list.length}`);
console.log(`Pregenerated read object size: ${read_size}`);
console.log(`Pregenerated total objects: ${obj_list.length}`);
console.log(`Pre allocated reading VUs: ${pre_alloc_read_vus}`);
console.log(`Pre allocated writing VUs: ${pre_alloc_write_vus}`);
console.log(`Pre allocated deleting VUs: ${pre_alloc_delete_vus}`);
console.log(`Total pre allocated VUs: ${total_pre_allocated_vu_count}`);
console.log(`Max reading VUs: ${max_read_vus}`);
console.log(`Max writing VUs: ${max_write_vus}`);
console.log(`Max deleting VUs: ${max_delete_vus}`);
console.log(`Total max VUs: ${total_max_vu_count}`);
console.log(`Time unit: ${time_unit}`);
console.log(`Read rate: ${read_rate}`);
console.log(`Writing rate: ${write_rate}`);
console.log(`Delete rate: ${delete_rate}`);
const start_timestamp = Date.now()
console.log(
`Load started at: ${Date(start_timestamp).toString()}`)
}
export function teardown(data) {
if (obj_registry) {
obj_registry.close();
}
const end_timestamp = Date.now()
console.log(
`Load finished at: ${Date(end_timestamp).toString()}`)
}
export function handleSummary(data) {
return {
'stdout': textSummary(data, {indent: ' ', enableColors: false}),
[summary_json]: JSON.stringify(data),
};
}
export function obj_write() {
if (__ENV.SLEEP_WRITE) {
sleep(__ENV.SLEEP_WRITE);
}
const key = generateS3Key();
const bucket = bucket_list[Math.floor(Math.random() * bucket_list.length)];
const payload = generator.genPayload();
const resp = s3_client.put(bucket, key, payload);
if (!resp.success) {
log.withFields({bucket: bucket, key: key}).error(resp.error);
return;
}
if (obj_registry) {
obj_registry.addObject('', '', bucket, key, payload.hash());
}
}
export function obj_read() {
if (__ENV.SLEEP_READ) {
sleep(__ENV.SLEEP_READ);
}
if (obj_to_read_selector) {
const obj = obj_to_read_selector.nextObject();
if (!obj) {
return;
}
const resp = s3_client.get(obj.s3_bucket, obj.s3_key)
if (!resp.success) {
log.withFields({bucket: obj.s3_bucket, key: obj.s3_key})
.error(resp.error);
}
return
}
const obj = obj_list[Math.floor(Math.random() * obj_list.length)];
const resp = s3_client.get(obj.bucket, obj.object);
if (!resp.success) {
log.withFields({bucket: obj.bucket, key: obj.object}).error(resp.error);
}
}
export function obj_delete() {
if (__ENV.SLEEP_DELETE) {
sleep(__ENV.SLEEP_DELETE);
}
const obj = obj_to_delete_selector.nextObject();
if (!obj) {
return;
}
const resp = s3_client.delete(obj.s3_bucket, obj.s3_key);
if (!resp.success) {
log.withFields({bucket: obj.s3_bucket, key: obj.s3_key, op: 'DELETE'})
.error(resp.error);
return;
}
obj_registry.deleteObject(obj.id);
}

View file

@ -1,233 +0,0 @@
import {sleep} from 'k6';
import {SharedArray} from 'k6/data';
import exec from 'k6/execution';
import logging from 'k6/x/frostfs/logging';
import registry from 'k6/x/frostfs/registry';
import s3 from 'k6/x/frostfs/s3';
import stats from 'k6/x/frostfs/stats';
import {generateS3Key} from './libs/keygen.js';
import {newGenerator} from './libs/datagen.js';
import {parseEnv} from './libs/env-parser.js';
import {textSummary} from './libs/k6-summary-0.0.2.js';
parseEnv();
const obj_list = new SharedArray(
'obj_list',
function() { return JSON.parse(open(__ENV.PREGEN_JSON)).objects; });
const bucket_list = new SharedArray(
'bucket_list',
function() { return JSON.parse(open(__ENV.PREGEN_JSON)).buckets; });
const read_size = JSON.parse(open(__ENV.PREGEN_JSON)).obj_size;
const summary_json = __ENV.SUMMARY_JSON || '/tmp/summary.json';
const no_verify_ssl = __ENV.NO_VERIFY_SSL || 'true';
const connection_args = {
no_verify_ssl : no_verify_ssl
}
// Select random S3 endpoint for current VU
const s3_endpoints = __ENV.S3_ENDPOINTS.split(',');
const s3_endpoint =
s3_endpoints[Math.floor(Math.random() * s3_endpoints.length)];
const s3_client = s3.connect(s3_endpoint, connection_args);
const log = logging.new().withField('endpoint', s3_endpoint);
const registry_enabled = !!__ENV.REGISTRY_FILE;
const obj_registry =
registry_enabled ? registry.open(__ENV.REGISTRY_FILE) : undefined;
const duration = __ENV.DURATION;
if (!!__ENV.METRIC_TAGS) {
stats.setTags(__ENV.METRIC_TAGS)
}
const read_age = __ENV.READ_AGE ? parseInt(__ENV.READ_AGE) : 10;
let obj_to_read_selector = undefined;
if (registry_enabled) {
obj_to_read_selector = registry.getSelector(
__ENV.REGISTRY_FILE, 'obj_to_read',
__ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0, {
status : 'created',
age : read_age,
})
}
const scenarios = {};
const write_vu_count = parseInt(__ENV.WRITERS || '0');
const generator = newGenerator(write_vu_count > 0);
if (write_vu_count > 0) {
scenarios.write = {
executor : 'constant-vus',
vus : write_vu_count,
duration : `${duration}s`,
exec : 'obj_write',
gracefulStop : '5s',
};
}
const read_vu_count = parseInt(__ENV.READERS || '0');
if (read_vu_count > 0) {
scenarios.read = {
executor : 'constant-vus',
vus : read_vu_count,
duration : `${duration}s`,
exec : 'obj_read',
gracefulStop : '5s',
};
}
const delete_age = __ENV.DELETE_AGE ? parseInt(__ENV.DELETE_AGE) : undefined;
let obj_to_delete_selector = undefined;
let obj_to_delete_exit_on_null = undefined;
if (registry_enabled ) {
obj_to_delete_exit_on_null = (write_vu_count == 0) && (read_vu_count == 0)
let constructor = obj_to_delete_exit_on_null ? registry.getOneshotSelector
: registry.getSelector;
obj_to_delete_selector =
constructor(__ENV.REGISTRY_FILE, 'obj_to_delete',
__ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0, {
status : 'read',
age : delete_age,
});
}
const delete_vu_count = parseInt(__ENV.DELETERS || '0');
if (delete_vu_count > 0) {
if (!obj_to_delete_selector) {
throw 'Positive DELETE worker number without a proper object selector';
}
scenarios.delete = {
executor : 'constant-vus',
vus : delete_vu_count,
duration : `${duration}s`,
exec : 'obj_delete',
gracefulStop : '5s',
};
}
export const options = {
scenarios,
setupTimeout : '5s',
};
export function setup() {
const total_vu_count = write_vu_count + read_vu_count + delete_vu_count;
console.log(`Pregenerated buckets: ${bucket_list.length}`);
console.log(`Pregenerated read object size: ${read_size}`);
console.log(`Pregenerated total objects: ${obj_list.length}`);
console.log(`Reading VUs: ${read_vu_count}`);
console.log(`Writing VUs: ${write_vu_count}`);
console.log(`Deleting VUs: ${delete_vu_count}`);
console.log(`Total VUs: ${total_vu_count}`);
const start_timestamp = Date.now()
console.log(
`Load started at: ${Date(start_timestamp).toString()}`)
}
export function teardown(data) {
if (obj_registry) {
obj_registry.close();
}
const end_timestamp = Date.now()
console.log(
`Load finished at: ${Date(end_timestamp).toString()}`)
}
export function handleSummary(data) {
return {
'stdout' : textSummary(data, {indent : ' ', enableColors : false}),
[summary_json] : JSON.stringify(data),
};
}
export function obj_write() {
if (__ENV.SLEEP_WRITE) {
sleep(__ENV.SLEEP_WRITE);
}
const key = generateS3Key();
const bucket = bucket_list[Math.floor(Math.random() * bucket_list.length)];
const payload = generator.genPayload();
const resp = s3_client.put(bucket, key, payload);
if (!resp.success) {
log.withFields({bucket : bucket, key : key}).error(resp.error);
return;
}
if (obj_registry) {
obj_registry.addObject('', '', bucket, key, payload.hash());
}
}
export function obj_read() {
if (__ENV.SLEEP_READ) {
sleep(__ENV.SLEEP_READ);
}
if (obj_to_read_selector) {
const obj = obj_to_read_selector.nextObject();
if (!obj ) {
return;
}
const resp = s3_client.get(obj.s3_bucket, obj.s3_key)
if (!resp.success) {
log.withFields({bucket : obj.s3_bucket, key : obj.s3_key, status: obj.status, op: `READ`})
.error(resp.error);
} else {
obj_registry.setObjectStatus(obj.id, obj.status, 'read');
}
return
}
const obj = obj_list[Math.floor(Math.random() * obj_list.length)];
const resp = s3_client.get(obj.bucket, obj.object);
if (!resp.success) {
log.withFields({bucket : obj.bucket, key : obj.object}).error(resp.error);
} else {
obj_registry.setObjectStatus(obj.id, obj.status, 'read');
}
}
export function obj_delete() {
if (__ENV.SLEEP_DELETE) {
sleep(__ENV.SLEEP_DELETE);
}
const obj = obj_to_delete_selector.nextObject();
delete_object(obj)
}
export function delete_object(obj) {
if (!obj) {
if (obj_to_delete_exit_on_null) {
exec.test.abort("No more objects to select");
}
return;
}
const resp = s3_client.delete(obj.s3_bucket, obj.s3_key);
if (!resp.success) {
log.withFields({bucket : obj.s3_bucket, key : obj.s3_key, op : 'DELETE'})
.error(resp.error);
return;
}
obj_registry.deleteObject(obj.id);
}

View file

@ -1,119 +0,0 @@
import {sleep} from 'k6';
import {SharedArray} from 'k6/data';
import logging from 'k6/x/frostfs/logging';
import registry from 'k6/x/frostfs/registry';
import s3 from 'k6/x/frostfs/s3';
import stats from 'k6/x/frostfs/stats';
import {generateS3Key} from './libs/keygen.js';
import {newGenerator} from './libs/datagen.js';
import {parseEnv} from './libs/env-parser.js';
import {textSummary} from './libs/k6-summary-0.0.2.js';
parseEnv();
const bucket_list = new SharedArray('bucket_list', function() {
return JSON.parse(open(__ENV.PREGEN_JSON)).buckets;
});
const summary_json = __ENV.SUMMARY_JSON || '/tmp/summary.json';
// Select random S3 endpoint for current VU
const s3_endpoints = __ENV.S3_ENDPOINTS.split(',');
const s3_endpoint =
s3_endpoints[Math.floor(Math.random() * s3_endpoints.length)];
const no_verify_ssl = __ENV.NO_VERIFY_SSL || 'true';
const connection_args = {
no_verify_ssl: no_verify_ssl
};
const s3_client = s3.connect(s3_endpoint, connection_args);
const log = logging.new().withField('endpoint', s3_endpoint);
const registry_enabled = !!__ENV.REGISTRY_FILE;
const obj_registry =
registry_enabled ? registry.open(__ENV.REGISTRY_FILE) : undefined;
const duration = __ENV.DURATION;
if (!!__ENV.METRIC_TAGS) {
stats.setTags(__ENV.METRIC_TAGS)
}
const scenarios = {};
const write_vu_count = parseInt(__ENV.WRITERS || '0');
if (write_vu_count < 1) {
throw 'number of VUs (env WRITERS) performing write operations should be greater than 0';
}
const write_multipart_vu_count = parseInt(__ENV.WRITERS_MULTIPART || '0');
if (write_multipart_vu_count < 1) {
throw 'number of parts (env WRITERS_MULTIPART) to upload in parallel should be greater than 0';
}
const generator =
newGenerator(write_vu_count > 0 || write_multipart_vu_count > 0);
if (write_vu_count > 0) {
scenarios.write_multipart = {
executor: 'constant-vus',
vus: write_vu_count,
duration: `${duration}s`,
exec: 'obj_write_multipart',
gracefulStop: '5s',
};
}
export const options = {
scenarios,
setupTimeout: '5s',
};
export function setup() {
const total_vu_count = write_vu_count * write_multipart_vu_count;
console.log(`Pregenerated buckets: ${bucket_list.length}`);
console.log(`Writing VUs: ${write_vu_count}`);
console.log(`Writing multipart VUs: ${write_multipart_vu_count}`);
console.log(`Total VUs: ${total_vu_count}`);
}
export function teardown(data) {
if (obj_registry) {
obj_registry.close();
}
}
export function handleSummary(data) {
return {
'stdout': textSummary(data, {indent: ' ', enableColors: false}),
[summary_json]: JSON.stringify(data),
};
}
const write_multipart_part_size =
1024 * parseInt(__ENV.WRITE_OBJ_PART_SIZE || '0')
if (write_multipart_part_size < 5 * 1024 * 1024) {
throw 'part size (env WRITE_OBJ_PART_SIZE * 1024) must be greater than (5 MB)';
}
export function obj_write_multipart() {
if (__ENV.SLEEP_WRITE) {
sleep(__ENV.SLEEP_WRITE);
}
const key = generateS3Key();
const bucket = bucket_list[Math.floor(Math.random() * bucket_list.length)];
const payload = generator.genPayload();
const resp = s3_client.multipart(
bucket, key, write_multipart_part_size, write_multipart_vu_count,
payload);
if (!resp.success) {
log.withFields({bucket: bucket, key: key}).error(resp.error);
return;
}
if (obj_registry) {
obj_registry.addObject('', '', bucket, key, payload.hash());
}
}

View file

@ -1,173 +0,0 @@
import {SharedArray} from 'k6/data';
import exec from 'k6/execution';
import logging from 'k6/x/frostfs/logging';
import registry from 'k6/x/frostfs/registry';
import s3local from 'k6/x/frostfs/s3local';
import stats from 'k6/x/frostfs/stats';
import {generateS3Key} from './libs/keygen.js';
import {newGenerator} from './libs/datagen.js';
import {parseEnv} from './libs/env-parser.js';
import {textSummary} from './libs/k6-summary-0.0.2.js';
import {uuidv4} from './libs/k6-utils-1.4.0.js';
parseEnv();
const obj_list = new SharedArray('obj_list', function() {
return JSON.parse(open(__ENV.PREGEN_JSON)).objects;
});
const container_list = new SharedArray('container_list', function() {
return JSON.parse(open(__ENV.PREGEN_JSON)).containers;
});
const bucket_list = new SharedArray('bucket_list', function() {
return JSON.parse(open(__ENV.PREGEN_JSON)).buckets;
});
function bucket_mapping() {
if (container_list.length != bucket_list.length) {
throw 'The number of containers and buckets in the preset file must be the same.';
}
let mapping = {};
for (let i = 0; i < container_list.length; ++i) {
mapping[bucket_list[i]] = container_list[i];
}
return mapping;
}
const read_size = JSON.parse(open(__ENV.PREGEN_JSON)).obj_size;
const summary_json = __ENV.SUMMARY_JSON || '/tmp/summary.json';
const config_file = __ENV.CONFIG_FILE;
const config_dir = __ENV.CONFIG_DIR;
const max_total_size_gb =
__ENV.MAX_TOTAL_SIZE_GB ? parseInt(__ENV.MAX_TOTAL_SIZE_GB) : 0;
const s3_client = s3local.connect(
config_file, config_dir, {
'debug_logger': __ENV.DEBUG_LOGGER || 'false',
},
bucket_mapping(), max_total_size_gb);
const log = logging.new().withFields(
{'config_file': config_file, 'config_dir': config_dir});
if (!!__ENV.METRIC_TAGS) {
stats.setTags(__ENV.METRIC_TAGS)
}
const registry_enabled = !!__ENV.REGISTRY_FILE;
const obj_registry =
registry_enabled ? registry.open(__ENV.REGISTRY_FILE) : undefined;
let obj_to_read_selector = undefined;
if (registry_enabled) {
obj_to_read_selector = registry.getLoopedSelector(
__ENV.REGISTRY_FILE, 'obj_to_read',
__ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0, {
status: 'created',
})
}
const duration = __ENV.DURATION;
const scenarios = {};
const write_vu_count = parseInt(__ENV.WRITERS || '0');
const generator = newGenerator(write_vu_count > 0);
if (write_vu_count > 0) {
scenarios.write = {
executor: 'constant-vus',
vus: write_vu_count,
duration: `${duration}s`,
exec: 'obj_write',
gracefulStop: '5s',
};
}
const read_vu_count = parseInt(__ENV.READERS || '0');
if (read_vu_count > 0) {
scenarios.read = {
executor: 'constant-vus',
vus: read_vu_count,
duration: `${duration}s`,
exec: 'obj_read',
gracefulStop: '5s',
};
}
export const options = {
scenarios,
setupTimeout: '5s',
};
export function setup() {
const total_vu_count = write_vu_count + read_vu_count;
console.log(`Pregenerated buckets: ${bucket_list.length}`);
console.log(`Pregenerated read object size: ${read_size}`);
console.log(`Pregenerated total objects: ${obj_list.length}`);
console.log(`Reading VUs: ${read_vu_count}`);
console.log(`Writing VUs: ${write_vu_count}`);
console.log(`Total VUs: ${total_vu_count}`);
const start_timestamp = Date.now()
console.log(
`Load started at: ${Date(start_timestamp).toString()}`)
}
export function teardown(data) {
if (obj_registry) {
obj_registry.close();
}
const end_timestamp = Date.now()
console.log(
`Load finished at: ${Date(end_timestamp).toString()}`)
}
export function handleSummary(data) {
return {
'stdout': textSummary(data, {indent: ' ', enableColors: false}),
[summary_json]: JSON.stringify(data),
};
}
export function obj_write() {
const key = generateS3Key();
const bucket = bucket_list[Math.floor(Math.random() * bucket_list.length)];
const payload = generator.genPayload();
const resp = s3_client.put(bucket, key, payload);
if (!resp.success) {
if (resp.abort) {
exec.test.abort(resp.error);
}
log.withFields({bucket: bucket, key: key}).error(resp.error);
return;
}
if (obj_registry) {
obj_registry.addObject('', '', bucket, key, payload.hash());
}
}
export function obj_read() {
if (obj_to_read_selector) {
const obj = obj_to_read_selector.nextObject();
if (!obj) {
return;
}
const resp = s3_client.get(obj.s3_bucket, obj.s3_key)
if (!resp.success) {
log.withFields({bucket: obj.s3_bucket, key: obj.s3_key})
.error(resp.error);
}
return
}
const obj = obj_list[Math.floor(Math.random() * obj_list.length)];
const resp = s3_client.get(obj.bucket, obj.object);
if (!resp.success) {
log.withFields({bucket: obj.bucket, key: obj.object}).error(resp.error);
}
}

View file

@ -1,21 +1,15 @@
import { sleep } from 'k6';
import { Counter } from 'k6/metrics';
import logging from 'k6/x/frostfs/logging';
import native from 'k6/x/frostfs/native'; import native from 'k6/x/frostfs/native';
import registry from 'k6/x/frostfs/registry'; import registry from 'k6/x/frostfs/registry';
import s3 from 'k6/x/frostfs/s3'; import s3 from 'k6/x/frostfs/s3';
import stats from 'k6/x/frostfs/stats'; import { sleep } from 'k6';
import { Counter } from 'k6/metrics';
import { parseEnv } from './libs/env-parser.js';
import { textSummary } from './libs/k6-summary-0.0.2.js'; import { textSummary } from './libs/k6-summary-0.0.2.js';
parseEnv();
const obj_registry = registry.open(__ENV.REGISTRY_FILE); const obj_registry = registry.open(__ENV.REGISTRY_FILE);
// Time limit (in seconds) for the run // Time limit (in seconds) for the run
const time_limit = __ENV.TIME_LIMIT || '60'; const time_limit = __ENV.TIME_LIMIT || "60";
const summary_json = __ENV.SUMMARY_JSON || '/tmp/summary.json'; const summary_json = __ENV.SUMMARY_JSON || "/tmp/summary.json";
// Number of objects in each status. These counters are cumulative in a // Number of objects in each status. These counters are cumulative in a
// sense that they reflect total number of objects in the registry, not just // sense that they reflect total number of objects in the registry, not just
@ -23,147 +17,116 @@ const summary_json = __ENV.SUMMARY_JSON || '/tmp/summary.json';
// This allows to run this scenario multiple times and collect overall // This allows to run this scenario multiple times and collect overall
// statistics in the final run. // statistics in the final run.
const obj_counters = { const obj_counters = {
verified: new Counter('verified_obj'), verified: new Counter('verified_obj'),
skipped: new Counter('skipped_obj'), skipped: new Counter('skipped_obj'),
invalid: new Counter('invalid_obj'), invalid: new Counter('invalid_obj'),
}; };
let log = logging.new();
if (!!__ENV.METRIC_TAGS) {
stats.setTags(__ENV.METRIC_TAGS)
}
// Connect to random gRPC endpoint // Connect to random gRPC endpoint
let grpc_client = undefined; let grpc_client = undefined;
if (__ENV.GRPC_ENDPOINTS) { if (__ENV.GRPC_ENDPOINTS) {
const grpcEndpoints = __ENV.GRPC_ENDPOINTS.split(','); const grpcEndpoints = __ENV.GRPC_ENDPOINTS.split(',');
const grpcEndpoint = const grpcEndpoint = grpcEndpoints[Math.floor(Math.random() * grpcEndpoints.length)];
grpcEndpoints[Math.floor(Math.random() * grpcEndpoints.length)]; grpc_client = native.connect(grpcEndpoint, '', __ENV.DIAL_TIMEOUT ? parseInt(__ENV.DIAL_TIMEOUT) : 0, __ENV.STREAM_TIMEOUT ? parseInt(__ENV.STREAM_TIMEOUT) : 0);
log = log.withField('endpoint', grpcEndpoint);
grpc_client = native.connect(
grpcEndpoint, '', __ENV.DIAL_TIMEOUT ? parseInt(__ENV.DIAL_TIMEOUT) : 0,
__ENV.STREAM_TIMEOUT ? parseInt(__ENV.STREAM_TIMEOUT) : 0,
__ENV.PREPARE_LOCALLY ? __ENV.PREPARE_LOCALLY.toLowerCase() === 'true' : false,
1024 * parseInt(__ENV.MAX_OBJECT_SIZE || '0'));
} }
// Connect to random S3 endpoint // Connect to random S3 endpoint
let s3_client = undefined; let s3_client = undefined;
if (__ENV.S3_ENDPOINTS) { if (__ENV.S3_ENDPOINTS) {
const no_verify_ssl = __ENV.NO_VERIFY_SSL || 'true'; const s3_endpoints = __ENV.S3_ENDPOINTS.split(',');
const connection_args = { no_verify_ssl: no_verify_ssl }; const s3_endpoint = s3_endpoints[Math.floor(Math.random() * s3_endpoints.length)];
const s3_endpoints = __ENV.S3_ENDPOINTS.split(','); s3_client = s3.connect(`http://${s3_endpoint}`);
const s3_endpoint =
s3_endpoints[Math.floor(Math.random() * s3_endpoints.length)];
log = log.withField('endpoint', s3_endpoint);
s3_client = s3.connect(s3_endpoint, connection_args);
} }
// We will attempt to verify every object in "created" status. The scenario will // We will attempt to verify every object in "created" status. The scenario will execute
// execute as many iterations as there are objects. Each object will have 3 // as many iterations as there are objects. Each object will have 3 retries to be verified
// retries to be verified
const obj_to_verify_selector = registry.getSelector( const obj_to_verify_selector = registry.getSelector(
__ENV.REGISTRY_FILE, 'obj_to_verify', __ENV.REGISTRY_FILE,
__ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0, { "obj_to_verify",
status: 'created', __ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0,
}); {
status: "created",
}
);
const obj_to_verify_count = obj_to_verify_selector.count(); const obj_to_verify_count = obj_to_verify_selector.count();
// Execute at least one iteration (executor shared-iterations can't run 0 // Execute at least one iteration (executor shared-iterations can't run 0 iterations)
// iterations)
const iterations = Math.max(1, obj_to_verify_count); const iterations = Math.max(1, obj_to_verify_count);
// Executor shared-iterations requires number of iterations to be larger than // Executor shared-iterations requires number of iterations to be larger than number of VUs
// number of VUs
const vus = Math.min(__ENV.CLIENTS, iterations); const vus = Math.min(__ENV.CLIENTS, iterations);
const scenarios = { const scenarios = {
verify: { verify: {
executor: 'shared-iterations', executor: 'shared-iterations',
vus, vus,
iterations, iterations,
maxDuration: `${time_limit}s`, maxDuration: `${time_limit}s`,
exec: 'obj_verify', exec: 'obj_verify',
gracefulStop: '5s', gracefulStop: '5s',
} }
}; };
export const options = { export const options = {
scenarios, scenarios,
setupTimeout: '5s', setupTimeout: '5s',
}; };
export function setup() { export function setup() {
// Populate counters with initial values // Populate counters with initial values
for (const [status, counter] of Object.entries(obj_counters)) { for (const [status, counter] of Object.entries(obj_counters)) {
const obj_selector = registry.getSelector( const obj_selector = registry.getSelector(
__ENV.REGISTRY_FILE, status, __ENV.REGISTRY_FILE,
__ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0, { status }); status,
counter.add(obj_selector.count()); __ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0,
} { status });
counter.add(obj_selector.count());
}
} }
export function handleSummary(data) { export function handleSummary(data) {
return { return {
'stdout': textSummary(data, { indent: ' ', enableColors: false }), 'stdout': textSummary(data, { indent: ' ', enableColors: false }),
[summary_json]: JSON.stringify(data), [summary_json]: JSON.stringify(data),
}; };
} }
export function obj_verify() { export function obj_verify() {
if (obj_to_verify_count == 0) { if (__ENV.SLEEP) {
log.info('Nothing to verify'); sleep(__ENV.SLEEP);
return; }
}
if (__ENV.SLEEP) { const obj = obj_to_verify_selector.nextObject();
sleep(__ENV.SLEEP); if (!obj) {
} console.log("All objects have been verified");
return;
}
const obj = obj_to_verify_selector.nextObject(); const obj_status = verify_object_with_retries(obj, 3);
if (!obj) { obj_counters[obj_status].add(1);
log.info('All objects have been verified'); obj_registry.setObjectStatus(obj.id, obj_status);
return;
}
const obj_status = verify_object_with_retries(obj, 3);
obj_counters[obj_status].add(1);
obj_registry.setObjectStatus(obj.id, obj.status, obj_status);
} }
function verify_object_with_retries(obj, attempts) { function verify_object_with_retries(obj, attempts) {
for (let i = 0; i < attempts; i++) { for (let i = 0; i < attempts; i++) {
let result; let result;
// Different name is required. if (obj.c_id && obj.o_id) {
// ReferenceError: Cannot access a variable before initialization. result = grpc_client.verifyHash(obj.c_id, obj.o_id, obj.payload_hash);
let lg = log; } else if (obj.s3_bucket && obj.s3_key) {
if (obj.c_id && obj.o_id) { result = s3_client.verifyHash(obj.s3_bucket, obj.s3_key, obj.payload_hash);
lg = lg.withFields({ cid: obj.c_id, oid: obj.o_id }); } else {
result = grpc_client.verifyHash(obj.c_id, obj.o_id, obj.payload_hash); console.log(`Object id=${obj.id} cannot be verified with supported protocols`);
} else if (obj.s3_bucket && obj.s3_key) { return "skipped";
lg = lg.withFields({ bucket: obj.s3_bucket, key: obj.s3_key }); }
result =
s3_client.verifyHash(obj.s3_bucket, obj.s3_key, obj.payload_hash); if (result.success) {
} else { return "verified";
lg.withFields({ } else if (result.error == "hash mismatch") {
cid: obj.c_id, return "invalid";
oid: obj.o_id, }
bucket: obj.s3_bucket,
key: obj.s3_key // Unless we explicitly saw that there was a hash mismatch, then we will retry after a delay
}).warn(`Object cannot be verified with supported protocols`); console.log(`Verify error on ${obj.id}: ${result.error}. Object will be re-tried`);
return 'skipped'; sleep(__ENV.SLEEP);
} }
if (result.success) { return "invalid";
return 'verified';
} else if (result.error == 'hash mismatch') {
return 'invalid';
}
// Unless we explicitly saw that there was a hash mismatch, then we will
// retry after a delay
lg.error(`Verify error: ${result.error}. Object will be re-tried`);
sleep(__ENV.SLEEP);
}
return 'invalid';
} }