Compare commits

..

No commits in common. "master" and "feat/metrics" have entirely different histories.

57 changed files with 1931 additions and 2723 deletions

1
.forgejo/CODEOWNERS Normal file
View file

@ -0,0 +1 @@
* @TrueCloudLab/storage-core @TrueCloudLab/storage-services

View file

@ -13,7 +13,7 @@ jobs:
- name: Setup Go
uses: actions/setup-go@v3
with:
go-version: '1.22'
go-version: '1.21'
- name: Run commit format checker
uses: https://git.frostfs.info/TrueCloudLab/dco-go@v2

View file

@ -11,21 +11,20 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version: '1.23'
go-version: '1.21'
cache: true
- name: Install linters
run: make lint-install
- name: Run linters
run: make lint
- name: golangci-lint
uses: https://github.com/golangci/golangci-lint-action@v3
with:
version: latest
tests:
name: Tests
runs-on: ubuntu-latest
strategy:
matrix:
go_versions: [ '1.22', '1.23' ]
go_versions: [ '1.20', '1.21' ]
fail-fast: false
steps:
- uses: actions/checkout@v3
@ -48,7 +47,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version: '1.22'
go-version: '1.21'
cache: true
- name: Run tests

1
.gitattributes vendored
View file

@ -1 +0,0 @@
/go.sum -diff

View file

@ -1,3 +0,0 @@
.* @TrueCloudLab/storage-core-committers @TrueCloudLab/storage-core-developers @TrueCloudLab/storage-services-committers @TrueCloudLab/storage-services-developers
.forgejo/.* @potyarkin
Makefile @potyarkin

View file

@ -3,8 +3,8 @@
First, thank you for contributing! We love and encourage pull requests from
everyone. Please follow the guidelines:
- Check the open [issues](https://git.frostfs.info/TrueCloudLab/xk6-frostfs/issues) and
[pull requests](https://git.frostfs.info/TrueCloudLab/xk6-frostfs/pulls) for existing
- Check the open [issues](https://github.com/TrueCloudLab/xk6-frostfs/issues) and
[pull requests](https://github.com/TrueCloudLab/xk6-frostfs/pulls) for existing
discussions.
- Open an issue first, to discuss a new feature or enhancement.
@ -27,20 +27,19 @@ Start by forking the `xk6-frostfs` repository, make changes in a branch and then
send a pull request. We encourage pull requests to discuss code changes. Here
are the steps in details:
### Set up your repository
Fork [xk6-frostfs upstream](https://git.frostfs.info/TrueCloudLab/xk6-frostfs/fork) source
### Set up your GitHub Repository
Fork [xk6-frostfs upstream](https://github.com/TrueCloudLab/xk6-frostfs/fork) source
repository to your own personal repository. Copy the URL of your fork (you will
need it for the `git clone` command below).
```sh
$ git clone https://git.frostfs.info/TrueCloudLab/xk6-frostfs
$ git clone https://github.com/TrueCloudLab/xk6-frostfs
```
### Set up git remote as ``upstream``
```sh
$ cd xk6-frostfs
$ git remote add upstream https://git.frostfs.info/TrueCloudLab/xk6-frostfs
$ git remote add upstream https://github.com/TrueCloudLab/xk6-frostfs
$ git fetch upstream
$ git merge upstream/master
...
@ -90,7 +89,7 @@ $ git push origin feature/123-something_awesome
```
### Create a Pull Request
Pull requests can be created via git.frostfs.info. Refer to [this
Pull requests can be created via GitHub. Refer to [this
document](https://help.github.com/articles/creating-a-pull-request/) for
detailed steps on how to create a pull request. After a Pull Request gets peer
reviewed and approved, it will be merged.

View file

@ -3,15 +3,10 @@
# Common variables
REPO ?= $(shell go list -m)
VERSION ?= $(shell git describe --tags --dirty --match "v*" --always --abbrev=8 2>/dev/null || cat VERSION 2>/dev/null || echo "develop")
GO_VERSION ?= 1.22
LINT_VERSION ?= 1.60.3
TRUECLOUDLAB_LINT_VERSION ?= 0.0.7
GO_VERSION ?= 1.19
LINT_VERSION ?= 1.49.0
BINDIR = bin
OUTPUT_LINT_DIR ?= $(abspath $(BINDIR))/linters
LINT_DIR = $(OUTPUT_LINT_DIR)/golangci-lint-$(LINT_VERSION)-v$(TRUECLOUDLAB_LINT_VERSION)
TMP_DIR := .cache
# Binaries to build
CMDS = $(addprefix frostfs-, $(notdir $(wildcard cmd/*)))
BINS = $(addprefix $(BINDIR)/, $(CMDS))
@ -69,22 +64,7 @@ format:
# Run linters
lint:
@if [ ! -d "$(LINT_DIR)" ]; then \
make lint-install; \
fi
$(LINT_DIR)/golangci-lint run --timeout=5m
# Install linters
lint-install:
@rm -rf $(OUTPUT_LINT_DIR)
@mkdir -p $(OUTPUT_LINT_DIR)
@mkdir -p $(TMP_DIR)
@rm -rf $(TMP_DIR)/linters
@git -c advice.detachedHead=false clone --branch v$(TRUECLOUDLAB_LINT_VERSION) https://git.frostfs.info/TrueCloudLab/linters.git $(TMP_DIR)/linters
@@make -C $(TMP_DIR)/linters lib CGO_ENABLED=1 OUT_DIR=$(OUTPUT_LINT_DIR)
@rm -rf $(TMP_DIR)/linters
@rmdir $(TMP_DIR) 2>/dev/null || true
@CGO_ENABLED=1 GOBIN=$(LINT_DIR) go install -trimpath github.com/golangci/golangci-lint/cmd/golangci-lint@v$(LINT_VERSION)
@golangci-lint --timeout=5m run
# Run linters in Docker
docker/lint:

View file

@ -1,5 +1,5 @@
<p align="center">
<img src="./.forgejo/logo.svg" width="500px" alt="FrostFS logo">
<img src="./.github/logo.svg" width="500px" alt="FrostFS logo">
</p>
<p align="center">
<a href="https://go.k6.io/k6">k6</a> extension to test and benchmark FrostFS related protocols.
@ -48,16 +48,15 @@ Create native client with `connect` method. Arguments:
- dial timeout in seconds (0 for the default value)
- stream timeout in seconds (0 for the default value)
- generate object header on the client side (for big object - split locally too)
- max size for generated object header on the client side (for big object - the size that the object is splitted into)
```js
import native from 'k6/x/frostfs/native';
const frostfs_cli = native.connect("s01.frostfs.devenv:8080", "", 0, 0, false, 0)
const frostfs_cli = native.connect("s01.frostfs.devenv:8080", "", 0, 0, false)
```
### Methods
- `putContainer(params)`. The `params` is a dictionary (e.g.
`{placement_policy:'REP 3',name:'container-name',name_global_scope:'false'}`).
`{acl:'public-read-write',placement_policy:'REP 3',name:'container-name',name_global_scope:'false'}`).
Returns dictionary with `success`
boolean flag, `container_id` string, and `error` string.
- `setBufferSize(size)`. Sets internal buffer size for data upload and
@ -107,12 +106,11 @@ const s3_cli = s3.connect("https://s3.frostfs.devenv:8080")
You can also provide additional options:
```js
import s3 from 'k6/x/frostfs/s3';
const s3_cli = s3.connect("https://s3.frostfs.devenv:8080", {'no_verify_ssl': 'true', 'timeout': '60s', 'aws_profile': 'metal'})
const s3_cli = s3.connect("https://s3.frostfs.devenv:8080", {'no_verify_ssl': 'true', 'timeout': '60s'})
```
* `no_verify_ss` - Bool. If `true` - skip verifying the s3 certificate chain and host name (useful if s3 uses self-signed certificates)
* `timeout` - Duration. Set timeout for requests (in http client). If omitted or zero - timeout is infinite.
* `aws_profile` - String. Use custom profile credentials from `$HOME/.aws/credentials` file. If omitted or empty - use default profile.
### Methods
- `createBucket(bucket, params)`. Returns dictionary with `success` boolean flag
@ -187,25 +185,6 @@ Flags:
-v, --version version for registry-exporter
```
## Import pregen into registry db
You can import pregenerated json files into registry bolt db. Use `frostfs-xk6-registry import`. Usage examples are in help:
```shell
$ ./bin/frostfs-xk6-registry import -h
Import objects into registry from pregenerated files
Usage:
xk6-registry import [flags]
Examples:
xk6-registry import registry.bolt preset.json
xk6-registry import registry.bolt preset.json another_preset.json
Flags:
-h, --help help for import
```
# License
- [GNU General Public License v3.0](LICENSE)

View file

@ -78,7 +78,7 @@ func rootCmdRun(cmd *cobra.Command, args []string) error {
}
objRegistry := registry.NewObjRegistry(cmd.Context(), args[0])
objSelector := registry.NewObjSelector(objRegistry, 0, registry.SelectorAwaiting, &registry.ObjFilter{
objSelector := registry.NewObjSelector(objRegistry, 0, &registry.ObjFilter{
Status: status,
Age: age,
})

View file

@ -1,55 +0,0 @@
package importer
import (
"encoding/json"
"os"
"git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/registry"
)
type PreGenObj struct {
Bucket string `json:"bucket"`
Object string `json:"object"`
Container string `json:"container"`
}
type PreGenerateInfo struct {
Buckets []string `json:"buckets"`
Containers []string `json:"containers"`
Objects []PreGenObj `json:"objects"`
ObjSize string `json:"obj_size"`
}
// ImportJSONPreGen writes objects from pregenerated JSON file
// to the registry.
// Note that ImportJSONPreGen does not check if object already
// exists in the registry so in case of re-entry the registry
// will have two entities representing the same object.
func ImportJSONPreGen(o *registry.ObjRegistry, filename string) error {
f, err := os.ReadFile(filename)
if err != nil {
return err
}
var pregenInfo PreGenerateInfo
err = json.Unmarshal(f, &pregenInfo)
if err != nil {
return err
}
// AddObject uses DB.Batch to combine concurrent Batch calls
// into a single Bolt transaction. DB.Batch is limited by
// DB.MaxBatchDelay which may affect perfomance.
for _, obj := range pregenInfo.Objects {
if obj.Bucket != "" {
err = o.AddObject("", "", obj.Bucket, obj.Object, "")
} else {
err = o.AddObject(obj.Container, obj.Object, "", "", "")
}
if err != nil {
return err
}
}
return nil
}

View file

@ -1,27 +0,0 @@
package importer
import (
"git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/registry"
"github.com/spf13/cobra"
)
// Cmd represents the import command.
var Cmd = &cobra.Command{
Use: "import",
Short: "Import objects into registry",
Long: "Import objects into registry from pregenerated files",
Example: `xk6-registry import registry.bolt preset.json
xk6-registry import registry.bolt preset.json another_preset.json`,
RunE: runCmd,
Args: cobra.MinimumNArgs(2),
}
func runCmd(cmd *cobra.Command, args []string) error {
objRegistry := registry.NewObjRegistry(cmd.Context(), args[0])
for i := 1; i < len(args); i++ {
if err := ImportJSONPreGen(objRegistry, args[i]); err != nil {
return err
}
}
return nil
}

View file

@ -1,18 +0,0 @@
package main
import (
"context"
"os"
"os/signal"
"syscall"
)
func main() {
ctx, _ := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM, syscall.SIGHUP)
if cmd, err := rootCmd.ExecuteContextC(ctx); err != nil {
cmd.PrintErrln("Error:", err.Error())
cmd.PrintErrf("Run '%v --help' for usage.\n", cmd.CommandPath())
os.Exit(1)
}
}

View file

@ -1,33 +0,0 @@
package main
import (
"runtime"
"git.frostfs.info/TrueCloudLab/xk6-frostfs/cmd/xk6-registry/importer"
"git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/version"
"github.com/spf13/cobra"
)
var rootCmd = &cobra.Command{
Use: "xk6-registry",
Version: version.Version,
Short: "Command Line Tool to work with Registry",
Long: `Registry provides tools to work with object registry for xk6.
It contains command for importing objects in registry from preset`,
SilenceErrors: true,
SilenceUsage: true,
Run: rootCmdRun,
}
func init() {
cobra.AddTemplateFunc("runtimeVersion", runtime.Version)
rootCmd.SetVersionTemplate(`FrostFS xk6-registry
{{printf "Version: %s" .Version }}
GoVersion: {{ runtimeVersion }}
`)
rootCmd.AddCommand(importer.Cmd)
}
func rootCmdRun(cmd *cobra.Command, _ []string) {
_ = cmd.Usage()
}

View file

@ -1,8 +1,7 @@
import local from 'k6/x/frostfs/local';
import datagen from 'k6/x/frostfs/datagen';
import { uuidv4 } from '../scenarios/libs/k6-utils-1.4.0.js';
const generator = datagen.generator(1024, "random", false);
const payload = open('../go.sum', 'b');
const local_cli = local.connect("/path/to/config.yaml", "/path/to/config/dir", "", false)
export const options = {
@ -16,7 +15,6 @@ export default function () {
'unique_header': uuidv4()
}
const container_id = '6BVPPXQewRJ6J5EYmAPLczXxNocS7ikyF7amS2esWQnb';
const payload = generator.genPayload()
let resp = local_cli.put(container_id, headers, payload)
if (resp.success) {
local_cli.get(container_id, resp.object_id)

View file

@ -1,19 +1,19 @@
import native from 'k6/x/frostfs/native';
import datagen from 'k6/x/frostfs/datagen';
import { fail } from "k6";
import { uuidv4 } from '../scenarios/libs/k6-utils-1.4.0.js';
const generator = datagen.generator(1024, "random", false);
const frostfs_cli = native.connect("s01.frostfs.devenv:8080", "1dd37fba80fec4e6a6f13fd708d8dcb3b29def768017052f6c930fa1c5d90bbb", 0, 0, false, 0)
const payload = open('../go.sum', 'b');
const frostfs_cli = native.connect("s01.frostfs.devenv:8080", "1dd37fba80fec4e6a6f13fd708d8dcb3b29def768017052f6c930fa1c5d90bbb", 0, 0, false)
export const options = {
stages: [
{ duration: '30s', target: 10 },
{duration: '30s', target: 10},
],
};
export function setup() {
const params = {
acl: 'public-read-write',
placement_policy: 'REP 3',
name: 'container-name',
name_global_scope: 'false'
@ -24,11 +24,10 @@ export function setup() {
fail(res.error)
}
console.info("created container", res.container_id)
return { container_id: res.container_id }
return {container_id: res.container_id}
}
export default function (data) {
const payload = generator.genPayload()
let headers = {
'unique_header': uuidv4()
}

View file

@ -1,11 +1,9 @@
import native from 'k6/x/frostfs/native';
import datagen from 'k6/x/frostfs/datagen';
import { uuidv4 } from '../scenarios/libs/k6-utils-1.4.0.js';
const generator = datagen.generator(1024, "random", false);
const payload = generator.genPayload()
const payload = open('../go.sum', 'b');
const container = "AjSxSNNXbJUDPqqKYm1VbFVDGCakbpUNH8aGjPmGAH3B"
const frostfs_cli = native.connect("s01.frostfs.devenv:8080", "", 0, 0, false, 0)
const frostfs_cli = native.connect("s01.frostfs.devenv:8080", "", 0, 0, false)
const frostfs_obj = frostfs_cli.onsite(container, payload)
export const options = {
@ -16,11 +14,11 @@ export const options = {
export default function () {
let headers = {
'unique_header': uuidv4()
'unique_header': uuidv4()
}
let resp = frostfs_obj.put(headers)
if (resp.success) {
frostfs_cli.get(container, resp.object_id)
frostfs_cli.get(container, resp.object_id)
} else {
console.log(resp.error)
}

View file

@ -1,9 +1,8 @@
import s3 from 'k6/x/frostfs/s3';
import datagen from 'k6/x/frostfs/datagen';
import { fail } from 'k6'
import { uuidv4 } from '../scenarios/libs/k6-utils-1.4.0.js';
const generator = datagen.generator(1024, "random", false);
const payload = open('../go.sum', 'b');
const bucket = "cats"
const s3_cli = s3.connect("https://s3.frostfs.devenv:8080", {'no_verify_ssl': 'true'})
@ -17,6 +16,7 @@ export function setup() {
const params = {
acl: 'private',
lock_enabled: 'true',
location_constraint: 'ru'
}
const res = s3_cli.createBucket(bucket, params)
@ -27,7 +27,6 @@ export function setup() {
export default function () {
const key = uuidv4();
const payload = generator.genPayload()
if (s3_cli.put(bucket, key, payload).success) {
s3_cli.get(bucket, key)
}

View file

@ -1,16 +1,14 @@
import s3local from 'k6/x/frostfs/s3local';
import datagen from 'k6/x/frostfs/datagen';
import { uuidv4 } from '../scenarios/libs/k6-utils-1.4.0.js';
const bucket = "testbucket"
const generator = datagen.generator(1024, "random", false);
const payload = open('../go.sum', 'b');
const s3local_cli = s3local.connect("path/to/storage/config.yml", "path/to/storage/config/dir", {}, {
'testbucket': 'GBQDDUM1hdodXmiRHV57EUkFWJzuntsG8BG15wFSwam6',
});
export default function () {
const key = uuidv4();
const payload = generator.genPayload()
if (s3local_cli.put(bucket, key, payload).success) {
s3local_cli.get(bucket, key)
}

195
go.mod
View file

@ -1,141 +1,140 @@
module git.frostfs.info/TrueCloudLab/xk6-frostfs
go 1.22
go 1.20
require (
git.frostfs.info/TrueCloudLab/frostfs-node v0.44.1-0.20250113100501-6c51f48aab69
git.frostfs.info/TrueCloudLab/frostfs-s3-gw v0.32.0
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250109084609-328d214d2d76
git.frostfs.info/TrueCloudLab/frostfs-node v0.37.1-0.20231213105742-e39db63827d8
git.frostfs.info/TrueCloudLab/frostfs-s3-gw v0.27.0-rc.2
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20230928142024-84b9d29fc98c
git.frostfs.info/TrueCloudLab/tzhash v1.8.0
github.com/aws/aws-sdk-go-v2 v1.32.8
github.com/aws/aws-sdk-go-v2/config v1.28.9
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.47
github.com/aws/aws-sdk-go-v2/service/s3 v1.72.2
github.com/aws/aws-sdk-go-v2 v1.19.0
github.com/aws/aws-sdk-go-v2/config v1.18.28
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.72
github.com/aws/aws-sdk-go-v2/service/s3 v1.37.0
github.com/dop251/goja v0.0.0-20230626124041-ba8a63e79201
github.com/go-loremipsum/loremipsum v1.1.3
github.com/google/uuid v1.6.0
github.com/google/uuid v1.3.0
github.com/joho/godotenv v1.5.1
github.com/nspcc-dev/neo-go v0.106.3
github.com/panjf2000/ants/v2 v2.9.0
github.com/nspcc-dev/neo-go v0.101.5-0.20230808195420-5fc61be5f6c5
github.com/panjf2000/ants/v2 v2.8.0
github.com/sirupsen/logrus v1.9.3
github.com/spf13/cobra v1.8.1
github.com/stretchr/testify v1.9.0
go.etcd.io/bbolt v1.3.10
github.com/spf13/cobra v1.7.0
github.com/stretchr/testify v1.8.4
go.etcd.io/bbolt v1.3.7
go.k6.io/k6 v0.45.1
go.uber.org/zap v1.27.0
golang.org/x/sys v0.28.0
go.uber.org/zap v1.24.0
golang.org/x/sys v0.10.0
)
require (
git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1-0.20241205083807-762d7f9f9f08 // indirect
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20231121085847-241a9f1ad0a4 // indirect
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 // indirect
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241112082307-f17779933e88 // indirect
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20230531082742-c97d21411eb6 // indirect
git.frostfs.info/TrueCloudLab/hrw v1.2.1 // indirect
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240822104152-a3bc3099bd5b // indirect
git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0 // indirect
git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02 // indirect
github.com/VictoriaMetrics/easyproto v0.1.4 // indirect
github.com/antlr4-go/antlr/v4 v4.13.1 // indirect
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.7 // indirect
github.com/aws/aws-sdk-go-v2/credentials v1.17.50 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.23 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.27 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.27 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 // indirect
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.27 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.8 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.8 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.8 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.24.9 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.8 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.33.5 // indirect
github.com/aws/smithy-go v1.22.1 // indirect
github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
github.com/aws/aws-sdk-go v1.44.296 // indirect
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10 // indirect
github.com/aws/aws-sdk-go-v2/credentials v1.13.27 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.5 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.35 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.29 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.36 // indirect
github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.27 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.11 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.30 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.29 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.14.4 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.12.13 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.13 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.19.3 // indirect
github.com/aws/smithy-go v1.13.5 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/bluele/gcache v0.0.2 // indirect
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/cenkalti/backoff/v4 v4.2.1 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect
github.com/dgraph-io/badger/v4 v4.1.0 // indirect
github.com/dgraph-io/ristretto v0.1.1 // indirect
github.com/dlclark/regexp2 v1.10.0 // indirect
github.com/dustin/go-humanize v1.0.0 // indirect
github.com/fatih/color v1.15.0 // indirect
github.com/fsnotify/fsnotify v1.7.0 // indirect
github.com/go-chi/chi/v5 v5.0.8 // indirect
github.com/go-logr/logr v1.4.2 // indirect
github.com/fsnotify/fsnotify v1.6.0 // indirect
github.com/go-logr/logr v1.2.4 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-pkgz/expirable-cache/v3 v3.0.0 // indirect
github.com/go-sourcemap/sourcemap v2.1.4-0.20211119122758-180fcef48034+incompatible // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/glog v1.1.0 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.3 // indirect
github.com/golang/snappy v0.0.4 // indirect
github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
github.com/google/flatbuffers v1.12.1 // indirect
github.com/google/pprof v0.0.0-20230705174524-200ffdc848b8 // indirect
github.com/gorilla/mux v1.8.0 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect
github.com/hashicorp/golang-lru v0.6.0 // indirect
github.com/hashicorp/golang-lru/v2 v2.0.4 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
github.com/holiman/uint256 v1.2.4 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/ipfs/go-cid v0.4.1 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/klauspost/compress v1.17.4 // indirect
github.com/klauspost/cpuid/v2 v2.2.6 // indirect
github.com/klauspost/compress v1.16.7 // indirect
github.com/magiconair/properties v1.8.7 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.19 // indirect
github.com/minio/sha256-simd v1.0.1 // indirect
github.com/minio/sio v0.3.0 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/minio/highwayhash v1.0.2 // indirect
github.com/minio/sio v0.3.1 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/mr-tron/base58 v1.2.0 // indirect
github.com/mstoykov/atlas v0.0.0-20220811071828-388f114305dd // indirect
github.com/multiformats/go-base32 v0.1.0 // indirect
github.com/multiformats/go-base36 v0.2.0 // indirect
github.com/multiformats/go-multiaddr v0.14.0 // indirect
github.com/multiformats/go-multibase v0.2.0 // indirect
github.com/multiformats/go-multihash v0.2.3 // indirect
github.com/multiformats/go-varint v0.0.7 // indirect
github.com/nspcc-dev/go-ordered-json v0.0.0-20240301084351-0246b013f8b2 // indirect
github.com/nspcc-dev/rfc6979 v0.2.1 // indirect
github.com/nats-io/jwt/v2 v2.4.1 // indirect
github.com/nats-io/nats.go v1.27.1 // indirect
github.com/nats-io/nkeys v0.4.4 // indirect
github.com/nats-io/nuid v1.0.1 // indirect
github.com/nspcc-dev/rfc6979 v0.2.0 // indirect
github.com/onsi/ginkgo v1.16.5 // indirect
github.com/onsi/gomega v1.20.2 // indirect
github.com/pelletier/go-toml/v2 v2.2.2 // indirect
github.com/pierrec/lz4 v2.6.1+incompatible // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/prometheus/client_golang v1.19.0 // indirect
github.com/prometheus/client_model v0.5.0 // indirect
github.com/prometheus/common v0.48.0 // indirect
github.com/prometheus/procfs v0.12.0 // indirect
github.com/sagikazarmark/locafero v0.6.0 // indirect
github.com/sagikazarmark/slog-shim v0.1.0 // indirect
github.com/pelletier/go-toml/v2 v2.0.8 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_golang v1.16.0 // indirect
github.com/prometheus/client_model v0.4.0 // indirect
github.com/prometheus/common v0.44.0 // indirect
github.com/prometheus/procfs v0.11.0 // indirect
github.com/serenize/snaker v0.0.0-20201027110005-a7ad2135616e // indirect
github.com/sourcegraph/conc v0.3.0 // indirect
github.com/spaolacci/murmur3 v1.1.0 // indirect
github.com/spf13/afero v1.11.0 // indirect
github.com/spf13/cast v1.6.0 // indirect
github.com/spf13/afero v1.9.5 // indirect
github.com/spf13/cast v1.5.1 // indirect
github.com/spf13/jwalterweatherman v1.1.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/spf13/viper v1.19.0 // indirect
github.com/ssgreg/journald v1.0.0 // indirect
github.com/subosito/gotenv v1.6.0 // indirect
github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect
github.com/spf13/viper v1.16.0 // indirect
github.com/subosito/gotenv v1.4.2 // indirect
github.com/twmb/murmur3 v1.1.8 // indirect
go.opentelemetry.io/otel v1.31.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 // indirect
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0 // indirect
go.opentelemetry.io/otel/metric v1.31.0 // indirect
go.opentelemetry.io/otel/sdk v1.31.0 // indirect
go.opentelemetry.io/otel/trace v1.31.0 // indirect
go.opentelemetry.io/proto/otlp v1.3.1 // indirect
go.opencensus.io v0.24.0 // indirect
go.opentelemetry.io/otel v1.16.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.16.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.16.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.16.0 // indirect
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.16.0 // indirect
go.opentelemetry.io/otel/metric v1.16.0 // indirect
go.opentelemetry.io/otel/sdk v1.16.0 // indirect
go.opentelemetry.io/otel/trace v1.16.0 // indirect
go.opentelemetry.io/proto/otlp v0.20.0 // indirect
go.uber.org/atomic v1.11.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
golang.org/x/crypto v0.31.0 // indirect
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect
golang.org/x/net v0.30.0 // indirect
golang.org/x/sync v0.10.0 // indirect
golang.org/x/text v0.21.0 // indirect
golang.org/x/time v0.5.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 // indirect
google.golang.org/grpc v1.69.2 // indirect
google.golang.org/protobuf v1.36.1 // indirect
golang.org/x/crypto v0.11.0 // indirect
golang.org/x/exp v0.0.0-20230626212559-97b1e661b5df // indirect
golang.org/x/net v0.12.0 // indirect
golang.org/x/sync v0.3.0 // indirect
golang.org/x/text v0.11.0 // indirect
golang.org/x/time v0.3.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20230629202037-9506855d4529 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20230629202037-9506855d4529 // indirect
google.golang.org/grpc v1.56.1 // indirect
google.golang.org/protobuf v1.31.0 // indirect
gopkg.in/guregu/null.v3 v3.5.0 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
lukechampine.com/blake3 v1.2.1 // indirect
)

849
go.sum

File diff suppressed because it is too large Load diff

View file

@ -6,6 +6,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/metrics"
)
var (
@ -36,25 +37,16 @@ type sizeLimiter struct {
currentSize *atomic.Int64
}
func (*sizeLimiter) SetEvacuationInProgress(shardID string, value bool) {}
func (*sizeLimiter) AddMethodDuration(method string, d time.Duration) {}
func (*sizeLimiter) AddToContainerSize(cnrID string, size int64) {}
func (*sizeLimiter) AddToObjectCounter(shardID string, objectType string, delta int) {}
func (*sizeLimiter) ClearErrorCounter(shardID string) {}
func (*sizeLimiter) DeleteShardMetrics(shardID string) {}
func (*sizeLimiter) GC() engine.GCMetrics { return &noopGCMetrics{} }
func (*sizeLimiter) GC() metrics.GCMetrics { return &noopGCMetrics{} }
func (*sizeLimiter) IncErrorCounter(shardID string) {}
func (*sizeLimiter) SetMode(shardID string, mode mode.Mode) {}
func (*sizeLimiter) SetObjectCounter(shardID string, objectType string, v uint64) {}
func (*sizeLimiter) WriteCache() engine.WriteCacheMetrics { return &noopWriteCacheMetrics{} }
func (*sizeLimiter) DeleteContainerSize(cnrID string) {}
func (*sizeLimiter) DeleteContainerCount(cnrID string) {}
func (*sizeLimiter) SetContainerObjectCounter(_, _, _ string, _ uint64) {}
func (*sizeLimiter) IncContainerObjectCounter(_, _, _ string) {}
func (*sizeLimiter) SubContainerObjectCounter(_, _, _ string, _ uint64) {}
func (*sizeLimiter) IncRefillObjectsCount(_, _ string, _ int, _ bool) {}
func (*sizeLimiter) SetRefillPercent(_, _ string, _ uint32) {}
func (*sizeLimiter) SetRefillStatus(_, _, _ string) {}
func (*sizeLimiter) WriteCache() metrics.WriteCacheMetrics { return &noopWriteCacheMetrics{} }
func (sl *sizeLimiter) AddToPayloadCounter(shardID string, size int64) {
sl.currentSize.Add(size)
@ -67,27 +59,18 @@ func (sl *sizeLimiter) IsFull() bool {
type noopLimiter struct{}
func (*noopLimiter) SetEvacuationInProgress(shardID string, value bool) {}
func (*noopLimiter) AddMethodDuration(method string, d time.Duration) {}
func (*noopLimiter) AddToContainerSize(cnrID string, size int64) {}
func (*noopLimiter) AddToObjectCounter(shardID string, objectType string, delta int) {}
func (*noopLimiter) AddToPayloadCounter(shardID string, size int64) {}
func (*noopLimiter) ClearErrorCounter(shardID string) {}
func (*noopLimiter) DeleteShardMetrics(shardID string) {}
func (*noopLimiter) GC() engine.GCMetrics { return &noopGCMetrics{} }
func (*noopLimiter) GC() metrics.GCMetrics { return &noopGCMetrics{} }
func (*noopLimiter) IncErrorCounter(shardID string) {}
func (*noopLimiter) SetMode(shardID string, mode mode.Mode) {}
func (*noopLimiter) SetObjectCounter(shardID string, objectType string, v uint64) {}
func (*noopLimiter) WriteCache() engine.WriteCacheMetrics { return &noopWriteCacheMetrics{} }
func (*noopLimiter) WriteCache() metrics.WriteCacheMetrics { return &noopWriteCacheMetrics{} }
func (*noopLimiter) IsFull() bool { return false }
func (*noopLimiter) DeleteContainerSize(cnrID string) {}
func (*noopLimiter) DeleteContainerCount(cnrID string) {}
func (*noopLimiter) SetContainerObjectCounter(_, _, _ string, _ uint64) {}
func (*noopLimiter) IncContainerObjectCounter(_, _, _ string) {}
func (*noopLimiter) SubContainerObjectCounter(_, _, _ string, _ uint64) {}
func (*noopLimiter) IncRefillObjectsCount(_, _ string, _ int, _ bool) {}
func (*noopLimiter) SetRefillPercent(_, _ string, _ uint32) {}
func (*noopLimiter) SetRefillStatus(_, _, _ string) {}
type noopGCMetrics struct{}
@ -98,9 +81,9 @@ func (*noopGCMetrics) AddRunDuration(shardID string, d time.Duration, success bo
type noopWriteCacheMetrics struct{}
func (*noopWriteCacheMetrics) AddMethodDuration(_, _, _, _ string, _ bool, _ time.Duration) {}
func (*noopWriteCacheMetrics) Close(_, _ string) {}
func (*noopWriteCacheMetrics) IncOperationCounter(_, _, _, _ string, _ engine.NullBool) {}
func (*noopWriteCacheMetrics) SetActualCount(_, _, _ string, count uint64) {}
func (*noopWriteCacheMetrics) SetEstimateSize(_, _, _ string, _ uint64) {}
func (*noopWriteCacheMetrics) SetMode(shardID string, mode string) {}
func (*noopWriteCacheMetrics) AddMethodDuration(string, string, bool, time.Duration, string) {}
func (*noopWriteCacheMetrics) Close(shardID string) {}
func (*noopWriteCacheMetrics) IncOperationCounter(string, string, metrics.NullBool, string) {}
func (*noopWriteCacheMetrics) SetActualCount(shardID string, count uint64, storageType string) {}
func (*noopWriteCacheMetrics) SetEstimateSize(shardID string, size uint64, storageType string) {}
func (*noopWriteCacheMetrics) SetMode(shardID string, mode string) {}

View file

@ -19,7 +19,8 @@ import (
metabase "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
writecache "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache/config"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache/writecachebbolt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@ -30,6 +31,7 @@ import (
"go.etcd.io/bbolt"
"go.k6.io/k6/js/modules"
"go.k6.io/k6/metrics"
"go.uber.org/zap"
"golang.org/x/sys/unix"
)
@ -121,7 +123,7 @@ func (r *RootModule) GetOrCreateEngine(ctx context.Context, configFile string, c
r.configFile = configFile
r.configDir = configDir
appCfg := config.New(configFile, configDir, "")
ngOpts, shardOpts, err := storageEngineOptionsFromConfig(ctx, appCfg, debug, r.l)
ngOpts, shardOpts, err := storageEngineOptionsFromConfig(appCfg, debug, r.l)
if err != nil {
return nil, nil, fmt.Errorf("creating engine options from config: %v", err)
}
@ -230,31 +232,30 @@ func (epochState) CurrentEpoch() uint64 { return 0 }
// preloaded the storage (if any), by using the same configuration file.
//
// Note that the configuration file only needs to contain the storage-specific sections.
func storageEngineOptionsFromConfig(ctx context.Context, c *config.Config, debug bool, l Limiter) ([]engine.Option, [][]shard.Option, error) {
prm := logger.Prm{}
_ = prm.SetDestination("stdout")
func storageEngineOptionsFromConfig(c *config.Config, debug bool, l Limiter) ([]engine.Option, [][]shard.Option, error) {
log := zap.L()
if debug {
_ = prm.SetLevelString("debug")
}
lg, err := logger.NewLogger(&prm)
if err != nil {
return nil, nil, err
var err error
log, err = zap.NewDevelopment()
if err != nil {
return nil, nil, fmt.Errorf("creating development logger: %v", err)
}
}
ngOpts := []engine.Option{
engine.WithErrorThreshold(engineconfig.ShardErrorThreshold(c)),
engine.WithShardPoolSize(engineconfig.ShardPoolSize(c)),
engine.WithLogger(lg),
engine.WithLogger(&logger.Logger{Logger: log}),
engine.WithMetrics(l),
}
var shOpts [][]shard.Option
err = engineconfig.IterateShards(c, false, func(sc *shardconfig.Config) error {
err := engineconfig.IterateShards(c, false, func(sc *shardconfig.Config) error {
opts := []shard.Option{
shard.WithRefillMetabase(sc.RefillMetabase()),
shard.WithMode(sc.Mode()),
shard.WithLogger(lg),
shard.WithLogger(&logger.Logger{Logger: log}),
}
// substorages
@ -266,14 +267,13 @@ func storageEngineOptionsFromConfig(ctx context.Context, c *config.Config, debug
cfg := blobovniczaconfig.From((*config.Config)(scfg))
ss := blobstor.SubStorage{
Storage: blobovniczatree.NewBlobovniczaTree(
ctx,
blobovniczatree.WithRootPath(scfg.Path()),
blobovniczatree.WithPermissions(scfg.Perm()),
blobovniczatree.WithBlobovniczaSize(cfg.Size()),
blobovniczatree.WithBlobovniczaShallowDepth(cfg.ShallowDepth()),
blobovniczatree.WithBlobovniczaShallowWidth(cfg.ShallowWidth()),
blobovniczatree.WithOpenedCacheSize(cfg.OpenedCacheSize()),
blobovniczatree.WithLogger(lg),
blobovniczatree.WithLogger(&logger.Logger{Logger: log}),
),
Policy: func(_ *objectSDK.Object, data []byte) bool {
return uint64(len(data)) < sc.SmallSizeLimit()
@ -302,7 +302,7 @@ func storageEngineOptionsFromConfig(ctx context.Context, c *config.Config, debug
blobstor.WithCompressObjects(sc.Compress()),
blobstor.WithUncompressableContentTypes(sc.UncompressableContentTypes()),
blobstor.WithStorages(substorages),
blobstor.WithLogger(lg),
blobstor.WithLogger(&logger.Logger{Logger: log}),
))
}
@ -311,13 +311,19 @@ func storageEngineOptionsFromConfig(ctx context.Context, c *config.Config, debug
opts = append(opts,
shard.WithWriteCache(true),
shard.WithWriteCacheOptions(
[]writecache.Option{
writecache.WithPath(wc.Path()),
writecache.WithMaxObjectSize(wc.MaxObjectSize()),
writecache.WithFlushWorkersCount(wc.WorkerCount()),
writecache.WithMaxCacheSize(wc.SizeLimit()),
writecache.WithNoSync(wc.NoSync()),
writecache.WithLogger(lg),
writecache.Options{
Type: writecache.TypeBBolt,
BBoltOptions: []writecachebbolt.Option{
writecachebbolt.WithPath(wc.Path()),
writecachebbolt.WithMaxBatchSize(wc.BoltDB().MaxBatchSize()),
writecachebbolt.WithMaxBatchDelay(wc.BoltDB().MaxBatchDelay()),
writecachebbolt.WithMaxObjectSize(wc.MaxObjectSize()),
writecachebbolt.WithSmallObjectSize(wc.SmallObjectSize()),
writecachebbolt.WithFlushWorkersCount(wc.WorkersNumber()),
writecachebbolt.WithMaxCacheSize(wc.SizeLimit()),
writecachebbolt.WithNoSync(wc.NoSync()),
writecachebbolt.WithLogger(&logger.Logger{Logger: log}),
},
},
),
)
@ -347,7 +353,7 @@ func storageEngineOptionsFromConfig(ctx context.Context, c *config.Config, debug
Timeout: 1 * time.Second,
}),
metabase.WithEpochState(epochState{}),
metabase.WithLogger(lg),
metabase.WithLogger(&logger.Logger{Logger: log}),
))
}

View file

@ -52,7 +52,7 @@ func (c *RawClient) Put(ctx context.Context, containerID cid.ID, ownerID *user.I
obj := object.New()
obj.SetContainerID(containerID)
obj.SetOwnerID(*ownerID)
obj.SetOwnerID(ownerID)
obj.SetAttributes(attrs...)
obj.SetPayload(payload)
obj.SetPayloadSize(uint64(sz))
@ -69,7 +69,7 @@ func (c *RawClient) Put(ctx context.Context, containerID cid.ID, ownerID *user.I
}
var req engine.PutPrm
req.Object = obj
req.WithObject(obj)
start := time.Now()
err = c.ng.Put(ctx, req)

View file

@ -1,10 +1,6 @@
package logging
import (
"fmt"
"strings"
"time"
"github.com/dop251/goja"
"github.com/sirupsen/logrus"
"go.k6.io/k6/js/modules"
@ -59,29 +55,14 @@ func (r *RootModule) NewModuleInstance(vu modules.VU) modules.Instance {
return &Logging{vu: vu}
}
tsFormat, disableTs := time.TimeOnly, false
if val, ok := vu.InitEnv().LookupEnv("DATE_FORMAT"); ok {
switch strings.ToLower(val) {
case "timeonly":
case "datetime":
tsFormat = time.DateTime
case "none":
disableTs = true
default:
panic(fmt.Sprintf("invalid value for DATE_FORMAT: %s (should be `timeonly`, `datetime` or `none`)", val))
}
}
format := lg.Formatter
switch f := format.(type) {
case *logrus.TextFormatter:
f.ForceColors = true
f.FullTimestamp = true
f.TimestampFormat = tsFormat
f.DisableTimestamp = disableTs
f.TimestampFormat = "15:04:05"
case *logrus.JSONFormatter:
f.TimestampFormat = tsFormat
f.DisableTimestamp = disableTs
f.TimestampFormat = "15:04:05"
}
return &Logging{vu: vu}

View file

@ -1,58 +0,0 @@
package native
import (
"context"
"sync"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
)
const networkCacheTTL = time.Minute
var networkInfoCache = &networkInfoCacheT{}
type networkInfoCacheT struct {
guard sync.RWMutex
current *netmap.NetworkInfo
fetchTS time.Time
}
func (c *networkInfoCacheT) getOrFetch(ctx context.Context, cli *client.Client) (*netmap.NetworkInfo, error) {
if v := c.get(); v != nil {
return v, nil
}
return c.fetch(ctx, cli)
}
func (c *networkInfoCacheT) get() *netmap.NetworkInfo {
c.guard.RLock()
defer c.guard.RUnlock()
if c.current == nil || time.Since(c.fetchTS) > networkCacheTTL {
return nil
}
return c.current
}
func (c *networkInfoCacheT) fetch(ctx context.Context, cli *client.Client) (*netmap.NetworkInfo, error) {
c.guard.Lock()
defer c.guard.Unlock()
if time.Since(c.fetchTS) <= networkCacheTTL {
return c.current, nil
}
res, err := cli.NetworkInfo(ctx, client.PrmNetworkInfo{})
if err != nil {
return nil, err
}
v := res.Info()
c.current = &v
c.fetchTS = time.Now()
return c.current, nil
}

View file

@ -13,6 +13,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@ -34,7 +35,6 @@ type (
tok session.Object
cli *client.Client
prepareLocally bool
maxObjSize uint64
}
PutResponse struct {
@ -71,7 +71,6 @@ type (
hdr object.Object
payload []byte
prepareLocally bool
maxObjSize uint64
}
)
@ -101,10 +100,10 @@ func (c *Client) Put(containerID string, headers map[string]string, payload data
var o object.Object
o.SetContainerID(cliContainerID)
o.SetOwnerID(owner)
o.SetOwnerID(&owner)
o.SetAttributes(attrs...)
resp, err := put(c.vu, c.cli, c.prepareLocally, &tok, &o, payload, chunkSize, c.maxObjSize)
resp, err := put(c.vu, c.cli, c.prepareLocally, &tok, &o, payload, chunkSize)
if err != nil {
return PutResponse{Success: false, Error: err.Error()}
}
@ -241,7 +240,7 @@ func (c *Client) VerifyHash(containerID, objectID, expectedHash string) VerifyHa
}
actualHash := hex.EncodeToString(hasher.Sum(nil))
if actualHash != expectedHash {
return VerifyHashResponse{Success: false, Error: "hash mismatch"}
return VerifyHashResponse{Success: true, Error: "hash mismatch"}
}
return VerifyHashResponse{Success: true}
@ -264,6 +263,16 @@ func (c *Client) PutContainer(params map[string]string) PutContainerResponse {
container.SetCreationTime(&cnr, time.Now())
cnr.SetOwner(usr)
if basicACLStr, ok := params["acl"]; ok {
var basicACL acl.Basic
err := basicACL.DecodeString(basicACLStr)
if err != nil {
return c.putCnrErrorResponse(err)
}
cnr.SetBasicACL(basicACL)
}
placementPolicyStr, ok := params["placement_policy"]
if ok {
var placementPolicy netmap.PlacementPolicy
@ -345,7 +354,7 @@ func (c *Client) Onsite(containerID string, payload datagen.Payload) PreparedObj
obj.SetVersion(&apiVersion)
obj.SetType(object.TypeRegular)
obj.SetContainerID(cliContainerID)
obj.SetOwnerID(owner)
obj.SetOwnerID(&owner)
obj.SetPayloadSize(uint64(ln))
obj.SetCreationEpoch(epoch)
@ -364,7 +373,6 @@ func (c *Client) Onsite(containerID string, payload datagen.Payload) PreparedObj
hdr: *obj,
payload: data,
prepareLocally: c.prepareLocally,
maxObjSize: c.maxObjSize,
}
}
@ -390,7 +398,7 @@ func (p PreparedObject) Put(headers map[string]string) PutResponse {
return PutResponse{Success: false, Error: err.Error()}
}
_, err = put(p.vu, p.cli, p.prepareLocally, nil, &obj, datagen.NewFixedPayload(p.payload), 0, p.maxObjSize)
_, err = put(p.vu, p.cli, p.prepareLocally, nil, &obj, datagen.NewFixedPayload(p.payload), 0)
if err != nil {
return PutResponse{Success: false, Error: err.Error()}
}
@ -405,7 +413,7 @@ func (s epochSource) CurrentEpoch() uint64 {
}
func put(vu modules.VU, cli *client.Client, prepareLocally bool, tok *session.Object,
hdr *object.Object, payload datagen.Payload, chunkSize int, maxObjSize uint64,
hdr *object.Object, payload datagen.Payload, chunkSize int,
) (*client.ResObjectPut, error) {
bufSize := defaultBufferSize
if chunkSize > 0 {
@ -420,22 +428,19 @@ func put(vu modules.VU, cli *client.Client, prepareLocally bool, tok *session.Ob
var prm client.PrmObjectPutInit
if tok != nil {
prm.Session = tok
prm.WithinSession(*tok)
}
if chunkSize > 0 {
prm.MaxChunkLength = chunkSize
prm.SetGRPCPayloadChunkLen(chunkSize)
}
if prepareLocally {
ni, err := networkInfoCache.getOrFetch(vu.Context(), cli)
res, err := cli.NetworkInfo(vu.Context(), client.PrmNetworkInfo{})
if err != nil {
return nil, err
}
prm.MaxSize = ni.MaxObjectSize()
prm.EpochSource = epochSource(ni.CurrentEpoch())
prm.WithoutHomomorphHash = true
if maxObjSize > 0 {
prm.MaxSize = maxObjSize
}
prm.WithObjectMaxSize(res.Info().MaxObjectSize())
prm.WithEpochSource(epochSource(res.Info().CurrentEpoch()))
prm.WithoutHomomorphicHash(true)
}
objectWriter, err := cli.ObjectPutInit(vu.Context(), prm)

View file

@ -52,17 +52,13 @@ func (n *Native) Exports() modules.Exports {
return modules.Exports{Default: n}
}
func (n *Native) Connect(endpoint, hexPrivateKey string, dialTimeout, streamTimeout int, prepareLocally bool, maxObjSize int) (*Client, error) {
func (n *Native) Connect(endpoint, hexPrivateKey string, dialTimeout, streamTimeout int, prepareLocally bool) (*Client, error) {
var (
cli client.Client
pk *keys.PrivateKey
err error
)
if maxObjSize < 0 {
return nil, fmt.Errorf("max object size value must be positive")
}
pk, err = keys.NewPrivateKey()
if len(hexPrivateKey) != 0 {
pk, err = keys.NewPrivateKeyFromHex(hexPrivateKey)
@ -72,18 +68,19 @@ func (n *Native) Connect(endpoint, hexPrivateKey string, dialTimeout, streamTime
}
var prmInit client.PrmInit
prmInit.Key = pk.PrivateKey
prmInit.ResolveFrostFSFailures()
prmInit.SetDefaultPrivateKey(pk.PrivateKey)
cli.Init(prmInit)
var prmDial client.PrmDial
prmDial.Endpoint = endpoint
prmDial.SetServerURI(endpoint)
if dialTimeout > 0 {
prmDial.DialTimeout = time.Duration(dialTimeout) * time.Second
prmDial.SetTimeout(time.Duration(dialTimeout) * time.Second)
}
if streamTimeout > 0 {
prmDial.StreamTimeout = time.Duration(streamTimeout) * time.Second
prmDial.SetStreamTimeout(time.Duration(streamTimeout) * time.Second)
}
err = cli.Dial(n.vu.Context(), prmDial)
@ -118,21 +115,6 @@ func (n *Native) Connect(endpoint, hexPrivateKey string, dialTimeout, streamTime
tok.SetAuthKey(&key)
tok.SetExp(exp)
res, err := cli.NetworkInfo(n.vu.Context(), client.PrmNetworkInfo{})
if err != nil {
return nil, err
}
prevEpoch := res.Info().CurrentEpoch() - 1
tok.SetNbf(prevEpoch)
tok.SetIat(prevEpoch)
if prepareLocally && maxObjSize > 0 {
if uint64(maxObjSize) > res.Info().MaxObjectSize() {
return nil, fmt.Errorf("max object size must be not greater than %d bytes", res.Info().MaxObjectSize())
}
}
// register metrics
objPutSuccess, _ = stats.Registry.NewMetric("frostfs_obj_put_success", metrics.Counter)
@ -159,6 +141,5 @@ func (n *Native) Connect(endpoint, hexPrivateKey string, dialTimeout, streamTime
tok: tok,
cli: &cli,
prepareLocally: prepareLocally,
maxObjSize: uint64(maxObjSize),
}, nil
}

View file

@ -10,17 +10,14 @@ type ObjExporter struct {
}
type PreGenerateInfo struct {
Buckets []string `json:"buckets"`
Containers []string `json:"containers"`
Objects []ObjInfo `json:"objects"`
ObjSize string `json:"obj_size"`
Buckets []string `json:"buckets"`
Objects []ObjInfo `json:"objects"`
ObjSize string `json:"obj_size"`
}
type ObjInfo struct {
Bucket string `json:"bucket"`
Object string `json:"object"`
CID string `json:"cid"`
OID string `json:"oid"`
}
func NewObjExporter(selector *ObjSelector) *ObjExporter {
@ -40,7 +37,6 @@ func (o *ObjExporter) ExportJSONPreGen(fileName string) error {
}
bucketMap := make(map[string]struct{})
containerMap := make(map[string]struct{})
count, err := o.selector.Count()
if err != nil {
@ -54,7 +50,7 @@ func (o *ObjExporter) ExportJSONPreGen(fileName string) error {
break
}
if err = writeObjectInfo(comma, info, f); err != nil {
if _, err = f.WriteString(fmt.Sprintf(`%s{"bucket":"%s","object":"%s"}`, comma, info.S3Bucket, info.S3Key)); err != nil {
return err
}
@ -62,54 +58,15 @@ func (o *ObjExporter) ExportJSONPreGen(fileName string) error {
comma = ","
}
if info.S3Bucket != "" {
bucketMap[info.S3Bucket] = struct{}{}
}
if info.CID != "" {
containerMap[info.CID] = struct{}{}
}
bucketMap[info.S3Bucket] = struct{}{}
}
if _, err = f.WriteString(`]`); err != nil {
return err
}
if len(bucketMap) > 0 {
if err = writeContainerInfo("buckets", bucketMap, f); err != nil {
return err
}
}
if len(containerMap) > 0 {
if err = writeContainerInfo("containers", containerMap, f); err != nil {
return err
}
}
if _, err = f.WriteString(`}`); err != nil {
return err
}
return nil
}
func writeObjectInfo(comma string, info *ObjectInfo, f *os.File) (err error) {
var res string
if info.S3Bucket != "" || info.S3Key != "" {
res = fmt.Sprintf(`%s{"bucket":"%s","object":"%s"}`, comma, info.S3Bucket, info.S3Key)
} else {
res = fmt.Sprintf(`%s{"cid":"%s","oid":"%s"}`, comma, info.CID, info.OID)
}
_, err = f.WriteString(res)
return err
}
func writeContainerInfo(attrName string, bucketMap map[string]struct{}, f *os.File) (err error) {
if _, err = f.WriteString(fmt.Sprintf(`,"%s":[`, attrName)); err != nil {
if _, err = f.WriteString(`],"buckets":[`); err != nil {
return err
}
i := 0
comma := ""
comma = ""
for bucket := range bucketMap {
if _, err = f.WriteString(fmt.Sprintf(`%s"%s"`, comma, bucket)); err != nil {
return err
@ -119,6 +76,7 @@ func writeContainerInfo(attrName string, bucketMap map[string]struct{}, f *os.Fi
}
i++
}
_, err = f.WriteString(`]`)
_, err = f.WriteString(`]}`)
return err
}

View file

@ -1,156 +0,0 @@
package registry
import (
"context"
"encoding/json"
"fmt"
"math/rand"
"os"
"path/filepath"
"slices"
"strings"
"testing"
"github.com/stretchr/testify/require"
)
type expectedResult struct {
mode string
objects []ObjectInfo
dir string
dbName string
jsonName string
}
func TestObjectExporter(t *testing.T) {
names := []string{"s3", "grpc"}
for _, name := range names {
t.Run(name, runExportTest)
t.Run(name+"-changed", runExportChangedTest)
t.Run(name+"-empty", runExportEmptyTest)
}
}
func runExportTest(t *testing.T) {
expected := getExpectedResult(t)
objReg := getFilledRegistry(t, expected)
objExp := NewObjExporter(NewObjSelector(objReg, 0, SelectorOneshot, &ObjFilter{Status: statusCreated}))
require.NoError(t, objExp.ExportJSONPreGen(expected.jsonName))
require.NoError(t, checkExported(expected.objects, expected.jsonName))
}
func runExportChangedTest(t *testing.T) {
expected := getExpectedResult(t)
objReg := getFilledRegistry(t, expected)
newStatus := randString(10)
num := randPositiveInt(1, len(expected.objects))
changedObjects := make([]ObjectInfo, num)
require.Equal(t, num, copy(changedObjects[:], expected.objects[:]))
sel := NewObjSelector(objReg, 0, SelectorOneshot, &ObjFilter{Status: statusCreated})
for i := range changedObjects {
changedObjects[i].Status = newStatus
require.NoError(t, objReg.SetObjectStatus(sel.NextObject().Id, statusCreated, newStatus))
}
objExp := NewObjExporter(NewObjSelector(objReg, 0, SelectorOneshot, &ObjFilter{Status: newStatus}))
require.NoError(t, objExp.ExportJSONPreGen(expected.jsonName))
require.NoError(t, checkExported(changedObjects, expected.jsonName))
}
func runExportEmptyTest(t *testing.T) {
expected := getExpectedResult(t)
expected.objects = make([]ObjectInfo, 0)
objReg := getFilledRegistry(t, expected)
objExp := NewObjExporter(NewObjSelector(objReg, 0, SelectorOneshot, &ObjFilter{Status: statusCreated}))
require.NoError(t, objExp.ExportJSONPreGen(expected.jsonName))
require.NoError(t, checkExported(expected.objects, expected.jsonName))
}
func getExpectedResult(t *testing.T) expectedResult {
num := randPositiveInt(2, 100)
mode := getMode(t.Name())
require.NotEqual(t, "", mode, "test mode should contain either \"s3\" or\"grpc\"")
dir := t.TempDir()
res := expectedResult{
mode: mode,
objects: generateObjectInfo(num, t.Name()),
dir: dir,
dbName: filepath.Join(dir, "registry-"+mode+".db"),
jsonName: filepath.Join(dir, "registry-"+mode+".json"),
}
return res
}
func randPositiveInt(min, max int) int {
return rand.Intn(max-min) + min
}
func getMode(name string) (res string) {
if strings.Contains(name, "s3") {
res = filepath.Base(name)
}
if strings.Contains(name, "grpc") {
res = filepath.Base(name)
}
return res
}
func generateObjectInfo(num int, mode string) []ObjectInfo {
res := make([]ObjectInfo, num)
for i := range res {
res[i] = randomObjectInfo()
if !strings.Contains(mode, "s3") {
res[i].S3Bucket = ""
res[i].S3Key = ""
}
if !strings.Contains(mode, "grpc") {
res[i].CID = ""
res[i].OID = ""
}
}
return res
}
func getFilledRegistry(t *testing.T, expected expectedResult) *ObjRegistry {
objReg := NewObjRegistry(context.Background(), expected.dbName)
for i := range expected.objects {
require.NoError(t, objReg.AddObject(expected.objects[i].CID, expected.objects[i].OID, expected.objects[i].S3Bucket, expected.objects[i].S3Key, expected.objects[i].PayloadHash))
}
return objReg
}
func checkExported(expected []ObjectInfo, fileName string) error {
file, err := os.ReadFile(fileName)
if err != nil {
return err
}
if !json.Valid(file) {
return fmt.Errorf("exported json file %s is invalid", fileName)
}
var actual PreGenerateInfo
if json.Unmarshal(file, &actual) != nil {
return err
}
if len(expected) != len(actual.Objects) {
return fmt.Errorf("expected len(): %v, got len(): %v", len(expected), len(actual.Objects))
}
for i := range expected {
if !slices.ContainsFunc(actual.Objects, func(oi ObjInfo) bool {
compareS3 := oi.Bucket == expected[i].S3Bucket && oi.Object == expected[i].S3Key
comparegRPC := oi.CID == expected[i].CID && oi.OID == expected[i].OID
return compareS3 && comparegRPC
}) {
return fmt.Errorf("object %v not found in exported json file %s", expected[i], fileName)
}
}
return nil
}

View file

@ -101,7 +101,7 @@ func randomObjectInfo() ObjectInfo {
func randString(n int) string {
var sb strings.Builder
for i := 0; i < n; i++ {
sb.WriteRune('a' + rune(rand.Int31())%('z'-'a'+1))
sb.WriteRune('a' + rune(rand.Int())%('z'-'a'+1))
}
return sb.String()
}

View file

@ -44,7 +44,7 @@ func NewObjRegistry(ctx context.Context, dbFilePath string) *ObjRegistry {
}
func (o *ObjRegistry) AddObject(cid, oid, s3Bucket, s3Key, payloadHash string) error {
return o.boltDB.Batch(func(tx *bbolt.Tx) error {
return o.boltDB.Update(func(tx *bbolt.Tx) error {
b, err := tx.CreateBucketIfNotExists([]byte(statusCreated))
if err != nil {
return err
@ -75,7 +75,7 @@ func (o *ObjRegistry) AddObject(cid, oid, s3Bucket, s3Key, payloadHash string) e
}
func (o *ObjRegistry) SetObjectStatus(id uint64, oldStatus, newStatus string) error {
return o.boltDB.Batch(func(tx *bbolt.Tx) error {
return o.boltDB.Update(func(tx *bbolt.Tx) error {
oldB := tx.Bucket([]byte(oldStatus))
if oldB == nil {
return fmt.Errorf("bucket doesn't exist: '%s'", oldStatus)
@ -110,7 +110,7 @@ func (o *ObjRegistry) SetObjectStatus(id uint64, oldStatus, newStatus string) er
}
func (o *ObjRegistry) DeleteObject(id uint64) error {
return o.boltDB.Batch(func(tx *bbolt.Tx) error {
return o.boltDB.Update(func(tx *bbolt.Tx) error {
return tx.ForEach(func(_ []byte, b *bbolt.Bucket) error {
return b.Delete(encodeId(id))
})

View file

@ -3,15 +3,12 @@ package registry
import (
"context"
"fmt"
"sync"
"time"
"github.com/nspcc-dev/neo-go/pkg/io"
"go.etcd.io/bbolt"
)
const nextObjectTimeout = 10 * time.Second
type ObjFilter struct {
Status string
Age int
@ -23,9 +20,6 @@ type ObjSelector struct {
boltDB *bbolt.DB
filter *ObjFilter
cacheSize int
kind SelectorKind
// Sync synchronizes VU used for deletion.
Sync sync.WaitGroup
}
// objectSelectCache is the default maximum size of a batch to select from DB.
@ -33,7 +27,7 @@ const objectSelectCache = 1000
// NewObjSelector creates a new instance of object selector that can iterate over
// objects in the specified registry.
func NewObjSelector(registry *ObjRegistry, selectionSize int, kind SelectorKind, filter *ObjFilter) *ObjSelector {
func NewObjSelector(registry *ObjRegistry, selectionSize int, filter *ObjFilter) *ObjSelector {
if selectionSize <= 0 {
selectionSize = objectSelectCache
}
@ -46,7 +40,6 @@ func NewObjSelector(registry *ObjRegistry, selectionSize int, kind SelectorKind,
filter: filter,
objChan: make(chan *ObjectInfo, selectionSize*2),
cacheSize: selectionSize,
kind: kind,
}
go objSelector.selectLoop()
@ -62,21 +55,12 @@ func NewObjSelector(registry *ObjRegistry, selectionSize int, kind SelectorKind,
// - underlying registry context is done, nil objects will be returned on the
// currently blocked and every further NextObject calls.
func (o *ObjSelector) NextObject() *ObjectInfo {
if o.kind == SelectorOneshot {
return <-o.objChan
}
select {
case <-time.After(nextObjectTimeout):
return nil
case obj := <-o.objChan:
return obj
}
return <-o.objChan
}
// Count returns total number of objects that match filter of the selector.
func (o *ObjSelector) Count() (int, error) {
count := 0
var count = 0
err := o.boltDB.View(func(tx *bbolt.Tx) error {
b := tx.Bucket([]byte(o.filter.Status))
if b == nil {
@ -176,20 +160,13 @@ func (o *ObjSelector) selectLoop() {
}
}
if o.kind == SelectorOneshot && len(cache) != o.cacheSize {
return
}
if o.kind != SelectorLooped && len(cache) != o.cacheSize {
if len(cache) != o.cacheSize {
// no more objects, wait a little; the logic could be improved.
select {
case <-time.After(time.Second):
case <-o.ctx.Done():
return
}
}
if o.kind == SelectorLooped && len(cache) != o.cacheSize {
lastID = 0
}

View file

@ -74,35 +74,7 @@ func (r *Registry) open(dbFilePath string) *ObjRegistry {
return registry
}
// SelectorKind represents selector behaviour when no items are available.
type SelectorKind byte
const (
// SelectorAwaiting waits for a new item to arrive.
// This selector visits each item exactly once and can be used when items
// to select are being pushed into registry concurrently.
SelectorAwaiting = iota
// SelectorLooped rewinds cursor to the start after all items have been read.
// It can encounter duplicates and should be used mostly for read scenarious.
SelectorLooped
// SelectorOneshot visits each item exactly once and exits immediately afterwards.
// It may be used to artificially abort the test after all items were processed.
SelectorOneshot
)
func (r *Registry) GetSelector(dbFilePath string, name string, cacheSize int, filter map[string]string) *ObjSelector {
return r.getSelectorInternal(dbFilePath, name, cacheSize, SelectorAwaiting, filter)
}
func (r *Registry) GetLoopedSelector(dbFilePath string, name string, cacheSize int, filter map[string]string) *ObjSelector {
return r.getSelectorInternal(dbFilePath, name, cacheSize, SelectorLooped, filter)
}
func (r *Registry) GetOneshotSelector(dbFilePath string, name string, cacheSize int, filter map[string]string) *ObjSelector {
return r.getSelectorInternal(dbFilePath, name, cacheSize, SelectorOneshot, filter)
}
func (r *Registry) getSelectorInternal(dbFilePath string, name string, cacheSize int, kind SelectorKind, filter map[string]string) *ObjSelector {
objFilter, err := parseFilter(filter)
if err != nil {
panic(err)
@ -114,7 +86,7 @@ func (r *Registry) getSelectorInternal(dbFilePath string, name string, cacheSize
selector := r.root.selectors[name]
if selector == nil {
registry := r.open(dbFilePath)
selector = NewObjSelector(registry, cacheSize, kind, objFilter)
selector = NewObjSelector(registry, cacheSize, objFilter)
r.root.selectors[name] = selector
} else if !reflect.DeepEqual(selector.filter, objFilter) {
panic(fmt.Sprintf("selector %s already has been created with a different filter", name))

View file

@ -142,70 +142,6 @@ func (c *Client) Get(bucket, key string) GetResponse {
return GetResponse{Success: true}
}
// DeleteObjectVersion deletes object version with specified versionID.
// If version argument is empty, deletes all versions and delete-markers of specified object.
func (c *Client) DeleteObjectVersion(bucket, key, version string) DeleteResponse {
var toDelete []types.ObjectIdentifier
if version != "" {
toDelete = append(toDelete, types.ObjectIdentifier{
Key: aws.String(key),
VersionId: aws.String(version),
})
} else {
versions, err := c.cli.ListObjectVersions(c.vu.Context(), &s3.ListObjectVersionsInput{
Bucket: aws.String(bucket),
Prefix: aws.String(key),
})
if err != nil {
stats.Report(c.vu, objDeleteFails, 1)
return DeleteResponse{Success: false, Error: err.Error()}
}
toDelete = filterObjectVersions(versions, key)
}
if len(toDelete) == 0 {
return c.Delete(bucket, key)
} else {
_, err := c.cli.DeleteObjects(c.vu.Context(), &s3.DeleteObjectsInput{
Bucket: aws.String(bucket),
Delete: &types.Delete{
Objects: toDelete,
Quiet: aws.Bool(true),
},
})
if err != nil {
stats.Report(c.vu, objDeleteFails, 1)
return DeleteResponse{Success: false, Error: err.Error()}
}
}
return DeleteResponse{Success: true}
}
func filterObjectVersions(versions *s3.ListObjectVersionsOutput, key string) []types.ObjectIdentifier {
var result []types.ObjectIdentifier
for _, v := range versions.Versions {
if *v.Key == key {
result = append(result, types.ObjectIdentifier{
Key: v.Key,
VersionId: v.VersionId,
})
}
}
for _, marker := range versions.DeleteMarkers {
if *marker.Key == key {
result = append(result, types.ObjectIdentifier{
Key: marker.Key,
VersionId: marker.VersionId,
})
}
}
return result
}
func get(
c *s3.Client,
bucket string,
@ -244,7 +180,7 @@ func (c *Client) VerifyHash(bucket, key, expectedHash string) VerifyHashResponse
}
actualHash := hex.EncodeToString(hasher.Sum(nil))
if actualHash != expectedHash {
return VerifyHashResponse{Success: false, Error: "hash mismatch"}
return VerifyHashResponse{Success: true, Error: "hash mismatch"}
}
return VerifyHashResponse{Success: true}
@ -272,33 +208,13 @@ func (c *Client) CreateBucket(bucket string, params map[string]string) CreateBuc
Bucket: aws.String(bucket),
ACL: types.BucketCannedACL(params["acl"]),
CreateBucketConfiguration: bucketConfiguration,
ObjectLockEnabledForBucket: aws.Bool(lockEnabled),
ObjectLockEnabledForBucket: lockEnabled,
})
if err != nil {
stats.Report(c.vu, createBucketFails, 1)
return CreateBucketResponse{Success: false, Error: err.Error()}
}
var versioning bool
if strVersioned, ok := params["versioning"]; ok {
if versioning, err = strconv.ParseBool(strVersioned); err != nil {
stats.Report(c.vu, createBucketFails, 1)
return CreateBucketResponse{Success: false, Error: err.Error()}
}
}
if versioning {
_, err = c.cli.PutBucketVersioning(context.TODO(), &s3.PutBucketVersioningInput{
Bucket: aws.String(bucket),
VersioningConfiguration: &types.VersioningConfiguration{
Status: types.BucketVersioningStatusEnabled,
},
})
if err != nil {
stats.Report(c.vu, createBucketFails, 1)
return CreateBucketResponse{Success: false, Error: err.Error()}
}
}
stats.Report(c.vu, createBucketSuccess, 1)
stats.Report(c.vu, createBucketDuration, metrics.D(time.Since(start)))
return CreateBucketResponse{Success: true}

View file

@ -53,9 +53,13 @@ func (s *S3) Exports() modules.Exports {
}
func (s *S3) Connect(endpoint string, params map[string]string) (*Client, error) {
cfg, err := config.LoadDefaultConfig(s.vu.Context(),
config.WithBaseEndpoint(endpoint),
config.WithSharedConfigProfile(params["aws_profile"]))
resolver := aws.EndpointResolverWithOptionsFunc(func(_, _ string, _ ...interface{}) (aws.Endpoint, error) {
return aws.Endpoint{
URL: endpoint,
}, nil
})
cfg, err := config.LoadDefaultConfig(s.vu.Context(), config.WithEndpointResolverWithOptions(resolver))
if err != nil {
return nil, fmt.Errorf("configuration error: %w", err)
}

View file

@ -5,7 +5,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
v2container "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/datagen"
"git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/local"
@ -16,7 +15,7 @@ import (
type Client struct {
vu modules.VU
l *layer.Layer
l layer.Client
ownerID *user.ID
resolver layer.BucketResolver
limiter local.Limiter
@ -43,14 +42,12 @@ func (c *Client) Put(bucket, key string, payload datagen.Payload) PutResponse {
Error: "engine size limit reached",
}
}
cid, err := c.resolver.Resolve(c.vu.Context(), v2container.SysAttributeZoneDefault, bucket)
cid, err := c.resolver.Resolve(c.vu.Context(), bucket)
if err != nil {
stats.Report(c.vu, objPutFails, 1)
return PutResponse{Error: err.Error()}
}
size := uint64(payload.Size())
prm := &layer.PutObjectParams{
BktInfo: &data.BucketInfo{
Name: bucket,
@ -60,7 +57,7 @@ func (c *Client) Put(bucket, key string, payload datagen.Payload) PutResponse {
},
Header: map[string]string{},
Object: key,
Size: &size,
Size: int64(payload.Size()),
Reader: payload.Reader(),
}
@ -72,14 +69,14 @@ func (c *Client) Put(bucket, key string, payload datagen.Payload) PutResponse {
stats.Report(c.vu, objPutDuration, metrics.D(time.Since(start)))
stats.Report(c.vu, objPutSuccess, 1)
stats.ReportDataSent(c.vu, float64(size))
stats.Report(c.vu, objPutData, float64(size))
stats.ReportDataSent(c.vu, float64(prm.Size))
stats.Report(c.vu, objPutData, float64(prm.Size))
return PutResponse{Success: true}
}
func (c *Client) Get(bucket, key string) GetResponse {
cid, err := c.resolver.Resolve(c.vu.Context(), v2container.SysAttributeZoneDefault, bucket)
cid, err := c.resolver.Resolve(c.vu.Context(), bucket)
if err != nil {
stats.Report(c.vu, objGetFails, 1)
return GetResponse{Error: err.Error()}
@ -111,14 +108,9 @@ func (c *Client) Get(bucket, key string) GetResponse {
Start: 0,
End: uint64(extInfo.ObjectInfo.Size),
},
Writer: wr,
}
objPayload, err := c.l.GetObject(c.vu.Context(), getPrm)
if err != nil {
stats.Report(c.vu, objGetFails, 1)
return GetResponse{Error: err.Error()}
}
err = objPayload.StreamTo(wr)
if err != nil {
if err := c.l.GetObject(c.vu.Context(), getPrm); err != nil {
stats.Report(c.vu, objGetFails, 1)
return GetResponse{Error: err.Error()}
}

View file

@ -7,14 +7,13 @@ import (
"io"
"time"
layer "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/frostfs"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/relations"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/local/rawclient"
)
@ -31,90 +30,61 @@ func unimplementedMessage(fname string) string {
"something other than filling a cluster (i.e. PUT or GET).", fname)
}
func (*frostfs) CreateContainer(context.Context, layer.PrmContainerCreate) (*layer.ContainerCreateResult, error) {
func (*frostfs) CreateContainer(context.Context, layer.PrmContainerCreate) (cid.ID, error) {
panic(unimplementedMessage("CreateContainer"))
}
func (*frostfs) Container(context.Context, layer.PrmContainer) (*container.Container, error) {
func (*frostfs) Container(context.Context, cid.ID) (*container.Container, error) {
panic(unimplementedMessage("Container"))
}
func (*frostfs) UserContainers(context.Context, layer.PrmUserContainers) ([]cid.ID, error) {
func (*frostfs) UserContainers(context.Context, user.ID) ([]cid.ID, error) {
panic(unimplementedMessage("UserContainers"))
}
func (*frostfs) SetContainerEACL(context.Context, eacl.Table, *session.Container) error {
panic(unimplementedMessage("SetContainerEACL"))
}
func (*frostfs) ContainerEACL(context.Context, cid.ID) (*eacl.Table, error) {
panic(unimplementedMessage("ContainerEACL"))
}
func (*frostfs) DeleteContainer(context.Context, cid.ID, *session.Container) error {
panic(unimplementedMessage("DeleteContainer"))
}
func (f *frostfs) HeadObject(ctx context.Context, prm layer.PrmObjectHead) (*object.Object, error) {
return f.Get(ctx, prm.Container, prm.Object)
}
func (f *frostfs) GetObject(ctx context.Context, prm layer.PrmObjectGet) (*layer.Object, error) {
func (f *frostfs) ReadObject(ctx context.Context, prm layer.PrmObjectRead) (*layer.ObjectPart, error) {
obj, err := f.Get(ctx, prm.Container, prm.Object)
if err != nil {
return nil, err
}
return &layer.Object{
Header: *obj,
Payload: io.NopCloser(bytes.NewReader(obj.Payload())),
}, nil
part := &layer.ObjectPart{}
if prm.WithHeader {
part.Head = obj
}
if prm.WithPayload {
part.Payload = io.NopCloser(bytes.NewReader(obj.Payload()))
}
return part, nil
}
func (f *frostfs) CreateObject(ctx context.Context, prm layer.PrmObjectCreate) (*layer.CreateObjectResult, error) {
func (f *frostfs) CreateObject(ctx context.Context, prm layer.PrmObjectCreate) (oid.ID, error) {
payload, err := io.ReadAll(prm.Payload)
if err != nil {
return nil, fmt.Errorf("reading payload: %v", err)
return oid.ID{}, fmt.Errorf("reading payload: %v", err)
}
hdrs := map[string]string{}
for _, attr := range prm.Attributes {
hdrs[attr[0]] = attr[1]
}
objID, err := f.Put(ctx, prm.Container, nil, hdrs, payload)
if err != nil {
return nil, err
}
return &layer.CreateObjectResult{
ObjectID: objID,
CreationEpoch: 0, // probably we should additionally invoke NetworkInfo
}, nil
return f.Put(ctx, prm.Container, &prm.Creator, hdrs, payload)
}
func (f *frostfs) DeleteObject(context.Context, layer.PrmObjectDelete) error {
panic(unimplementedMessage("DeleteObject"))
}
func (f *frostfs) TimeToEpoch(context.Context, time.Time, time.Time) (uint64, uint64, error) {
func (f *frostfs) TimeToEpoch(ctx context.Context, now time.Time, future time.Time) (uint64, uint64, error) {
panic(unimplementedMessage("TimeToEpoch"))
}
func (f *frostfs) SearchObjects(context.Context, layer.PrmObjectSearch) ([]oid.ID, error) {
panic(unimplementedMessage("SearchObjects"))
}
func (f *frostfs) AddContainerPolicyChain(context.Context, layer.PrmAddContainerPolicyChain) error {
panic(unimplementedMessage("AddContainerPolicyChain"))
}
func (f *frostfs) RangeObject(context.Context, layer.PrmObjectRange) (io.ReadCloser, error) {
panic(unimplementedMessage("RangeObject"))
}
func (f *frostfs) PatchObject(context.Context, layer.PrmObjectPatch) (oid.ID, error) {
panic(unimplementedMessage("PatchObject"))
}
func (f *frostfs) NetworkInfo(context.Context) (netmap.NetworkInfo, error) {
panic(unimplementedMessage("NetworkInfo"))
}
func (f *frostfs) Relations() relations.Relations {
panic(unimplementedMessage("Relations"))
}
func (f *frostfs) NetmapSnapshot(context.Context) (netmap.NetMap, error) {
panic(unimplementedMessage("NetmapSnapshot"))
}

View file

@ -1,19 +1,16 @@
package s3local
import (
"context"
"flag"
"fmt"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/pkg/service/tree"
v2container "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/local"
"git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/local/rawclient"
"git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/stats"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/panjf2000/ants/v2"
"go.k6.io/k6/js/modules"
"go.k6.io/k6/metrics"
"go.uber.org/zap"
@ -150,66 +147,29 @@ func (s *Local) Connect(configFile string, configDir string, params map[string]s
return nil, fmt.Errorf("creating bucket resolver: %v", err)
}
var owner user.ID
user.IDFromKey(&owner, key.PrivateKey.PublicKey)
workerPool, err := ants.NewPool(100)
if err != nil {
return nil, fmt.Errorf("init worker pool: %w", err)
}
anonKey, err := keys.NewPrivateKey()
if err != nil {
return nil, fmt.Errorf("create anon key: %w", err)
}
cfg := &layer.Config{
GateOwner: owner,
Cache: layer.NewCache(layer.DefaultCachesConfigs(zap.L())),
AnonKey: layer.AnonymousKey{Key: anonKey},
Caches: layer.DefaultCachesConfigs(zap.L()),
AnonKey: layer.AnonymousKey{Key: key},
Resolver: resolver,
TreeService: treeSvc,
Features: &FeatureSettings{},
GateKey: key,
WorkerPool: workerPool,
}
l := layer.NewLayer(zap.L(), &frostfs{rc}, cfg)
err = l.Initialize(s.l.VU().Context(), nopEventListener{})
if err != nil {
return nil, fmt.Errorf("initialize: %w", err)
}
return &Client{
vu: s.l.VU(),
l: layer.NewLayer(zap.L(), &frostfs{rc}, cfg),
l: l,
ownerID: rc.OwnerID(),
resolver: resolver,
limiter: limiter,
}, nil
}
type FeatureSettings struct {
}
type nopEventListener struct{}
func (k *FeatureSettings) TombstoneLifetime() uint64 {
return 10
}
func (k *FeatureSettings) TombstoneMembersSize() int {
return 100
}
func (k *FeatureSettings) BufferMaxSizeForPut() uint64 {
return 1024 * 1024
}
func (k *FeatureSettings) ClientCut() bool {
return false
}
func (k *FeatureSettings) MD5Enabled() bool {
return false
}
func (k *FeatureSettings) FormContainerZone(ns string) string {
if ns == "" {
return v2container.SysAttributeZoneDefault
}
return ns + ".ns"
}
func (nopEventListener) Subscribe(context.Context, string, layer.MsgHandler) error { return nil }
func (nopEventListener) Listen(context.Context) {}

View file

@ -24,9 +24,9 @@ func newFixedBucketResolver(bucketMapping map[string]string) (fixedBucketResolve
return r, nil
}
func (r fixedBucketResolver) Resolve(_ context.Context, zone, bucket string) (cid.ID, error) {
if cnrID, resolved := r[zone+"/"+bucket]; resolved {
return cnrID, nil
func (r fixedBucketResolver) Resolve(_ context.Context, bucket string) (cid.ID, error) {
if cid, resolved := r[bucket]; resolved {
return cid, nil
}
return cid.ID{}, fmt.Errorf("zone %s and bucket %s is not mapped to any container", zone, bucket)
return cid.ID{}, fmt.Errorf("bucket %s is not mapped to any container", bucket)
}

View file

@ -38,15 +38,15 @@ func (kv kv) GetValue() []byte { return kv.v }
type nodeResponse struct {
meta []tree.Meta
nodeID []uint64
parentID []uint64
ts []uint64
nodeID uint64
parentID uint64
ts uint64
}
func (r nodeResponse) GetMeta() []tree.Meta { return r.meta }
func (r nodeResponse) GetNodeID() []uint64 { return r.nodeID }
func (r nodeResponse) GetParentID() []uint64 { return r.parentID }
func (r nodeResponse) GetTimestamp() []uint64 { return r.ts }
func (r nodeResponse) GetMeta() []tree.Meta { return r.meta }
func (r nodeResponse) GetNodeID() uint64 { return r.nodeID }
func (r nodeResponse) GetParentID() uint64 { return r.parentID }
func (r nodeResponse) GetTimestamp() uint64 { return r.ts }
func (s treeServiceEngineWrapper) GetNodes(ctx context.Context, p *tree.GetNodesParams) ([]tree.NodeResponse, error) {
nodeIDs, err := s.ng.TreeGetByPath(ctx, p.BktInfo.CID, p.TreeID, pilorama.AttributeFilename, p.Path, p.LatestOnly)
@ -67,9 +67,9 @@ func (s treeServiceEngineWrapper) GetNodes(ctx context.Context, p *tree.GetNodes
return nil, err
}
resp := nodeResponse{
parentID: []uint64{parentID},
nodeID: []uint64{nodeID},
ts: []uint64{m.Time},
parentID: parentID,
nodeID: nodeID,
ts: m.Time,
}
if p.AllAttrs {
resp.meta = kvToTreeMeta(m.Items)
@ -89,17 +89,9 @@ func (s treeServiceEngineWrapper) GetNodes(ctx context.Context, p *tree.GetNodes
return resps, nil
}
func (s treeServiceEngineWrapper) GetSubTree(ctx context.Context, bktInfo *data.BucketInfo, treeID string, rootID []uint64, depth uint32, sort bool) ([]tree.NodeResponse, error) {
func (s treeServiceEngineWrapper) GetSubTree(ctx context.Context, bktInfo *data.BucketInfo, treeID string, rootID uint64, depth uint32) ([]tree.NodeResponse, error) {
var resps []tree.NodeResponse
if len(rootID) != 1 {
return nil, fmt.Errorf("unsupported multiple node ids")
}
if sort {
return nil, fmt.Errorf("unsupported sorted subtree")
}
var traverse func(nodeID uint64, curDepth uint32) error
traverse = func(nodeID uint64, curDepth uint32) error {
m, parentID, err := s.ng.TreeGetMeta(ctx, bktInfo.CID, treeID, nodeID)
@ -108,9 +100,9 @@ func (s treeServiceEngineWrapper) GetSubTree(ctx context.Context, bktInfo *data.
}
resps = append(resps, nodeResponse{
nodeID: []uint64{nodeID},
parentID: []uint64{parentID},
ts: []uint64{m.Time},
nodeID: nodeID,
parentID: parentID,
ts: m.Time,
meta: kvToTreeMeta(m.Items),
})
@ -130,7 +122,7 @@ func (s treeServiceEngineWrapper) GetSubTree(ctx context.Context, bktInfo *data.
return nil
}
if err := traverse(rootID[0], 0); err != nil {
if err := traverse(rootID, 0); err != nil {
return nil, fmt.Errorf("traversing: %v", err)
}
@ -199,12 +191,6 @@ func (s treeServiceEngineWrapper) RemoveNode(ctx context.Context, bktInfo *data.
return err
}
func (s treeServiceEngineWrapper) GetSubTreeStream(
ctx context.Context, bktInfo *data.BucketInfo, treeID string, rootID []uint64, depth uint32,
) (tree.SubTreeStream, error) {
panic(unimplementedMessage("TreeService.GetSubTreeStream"))
}
func mapToKV(m map[string]string) []pilorama.KeyValue {
var kvs []pilorama.KeyValue
for k, v := range m {

View file

@ -1,59 +1,71 @@
import { sleep } from 'k6';
import { SharedArray } from 'k6/data';
import exec from 'k6/execution';
import logging from 'k6/x/frostfs/logging';
import native from 'k6/x/frostfs/native';
import logging from 'k6/x/frostfs/logging';
import registry from 'k6/x/frostfs/registry';
import stats from 'k6/x/frostfs/stats';
import { newGenerator } from './libs/datagen.js';
import { parseEnv } from './libs/env-parser.js';
import { SharedArray } from 'k6/data';
import { sleep } from 'k6';
import { textSummary } from './libs/k6-summary-0.0.2.js';
import { parseEnv } from './libs/env-parser.js';
import { uuidv4 } from './libs/k6-utils-1.4.0.js';
import { newGenerator } from './libs/datagen.js';
parseEnv();
const obj_list = new SharedArray(
'obj_list',
function () { return JSON.parse(open(__ENV.PREGEN_JSON)).objects; });
const obj_list = new SharedArray('obj_list', function () {
return JSON.parse(open(__ENV.PREGEN_JSON)).objects;
});
const container_list = new SharedArray(
'container_list',
function () { return JSON.parse(open(__ENV.PREGEN_JSON)).containers; });
const container_list = new SharedArray('container_list', function () {
return JSON.parse(open(__ENV.PREGEN_JSON)).containers;
});
const read_size = JSON.parse(open(__ENV.PREGEN_JSON)).obj_size;
const summary_json = __ENV.SUMMARY_JSON || '/tmp/summary.json';
const summary_json = __ENV.SUMMARY_JSON || "/tmp/summary.json";
// Select random gRPC endpoint for current VU
const grpc_endpoints = __ENV.GRPC_ENDPOINTS.split(',');
const grpc_endpoint =
grpc_endpoints[Math.floor(Math.random() * grpc_endpoints.length)];
const grpc_client = native.connect(
grpc_endpoint, '', __ENV.DIAL_TIMEOUT ? parseInt(__ENV.DIAL_TIMEOUT) : 5,
__ENV.STREAM_TIMEOUT ? parseInt(__ENV.STREAM_TIMEOUT) : 60,
__ENV.PREPARE_LOCALLY ? __ENV.PREPARE_LOCALLY.toLowerCase() === 'true' : false,
1024 * parseInt(__ENV.MAX_OBJECT_SIZE || '0'));
const log = logging.new().withField('endpoint', grpc_endpoint);
const grpc_endpoint = grpc_endpoints[Math.floor(Math.random() * grpc_endpoints.length)];
const grpc_client = native.connect(grpc_endpoint, '',
__ENV.DIAL_TIMEOUT ? parseInt(__ENV.DIAL_TIMEOUT) : 5,
__ENV.STREAM_TIMEOUT ? parseInt(__ENV.STREAM_TIMEOUT) : 60,
__ENV.PREPARE_LOCALLY ? __ENV.PREPARE_LOCALLY.toLowerCase() === "true" : false);
const log = logging.new().withField("endpoint", grpc_endpoint);
const registry_enabled = !!__ENV.REGISTRY_FILE;
const obj_registry =
registry_enabled ? registry.open(__ENV.REGISTRY_FILE) : undefined;
const obj_registry = registry_enabled ? registry.open(__ENV.REGISTRY_FILE) : undefined;
const duration = __ENV.DURATION;
if (!!__ENV.METRIC_TAGS) {
stats.setTags(__ENV.METRIC_TAGS)
stats.setTags(__ENV.METRIC_TAGS)
}
const delete_age = __ENV.DELETE_AGE ? parseInt(__ENV.DELETE_AGE) : undefined;
let obj_to_delete_selector = undefined;
if (registry_enabled && delete_age) {
obj_to_delete_selector = registry.getSelector(
__ENV.REGISTRY_FILE,
"obj_to_delete",
__ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0,
{
status: "created",
age: delete_age,
}
);
}
const read_age = __ENV.READ_AGE ? parseInt(__ENV.READ_AGE) : 10;
let obj_to_read_selector = undefined;
if (registry_enabled) {
obj_to_read_selector = registry.getLoopedSelector(
__ENV.REGISTRY_FILE, 'obj_to_read',
__ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0, {
status: 'created',
age: read_age,
})
obj_to_read_selector = registry.getSelector(
__ENV.REGISTRY_FILE,
"obj_to_read",
__ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0,
{
status: "created",
age: read_age,
}
)
}
const scenarios = {};
@ -62,161 +74,138 @@ const write_vu_count = parseInt(__ENV.WRITERS || '0');
const write_grpc_chunk_size = 1024 * parseInt(__ENV.GRPC_CHUNK_SIZE || '0')
const generator = newGenerator(write_vu_count > 0);
if (write_vu_count > 0) {
scenarios.write = {
executor: 'constant-vus',
vus: write_vu_count,
duration: `${duration}s`,
exec: 'obj_write',
gracefulStop: '5s',
};
}
const delete_age = __ENV.DELETE_AGE ? parseInt(__ENV.DELETE_AGE) : undefined;
let obj_to_delete_selector = undefined;
let obj_to_delete_exit_on_null = undefined;
if (registry_enabled && delete_age) {
obj_to_delete_exit_on_null = write_vu_count == 0;
let constructor = obj_to_delete_exit_on_null ? registry.getOneshotSelector
: registry.getSelector;
obj_to_delete_selector =
constructor(__ENV.REGISTRY_FILE, 'obj_to_delete',
__ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0, {
status: 'created',
age: delete_age,
});
scenarios.write = {
executor: 'constant-vus',
vus: write_vu_count,
duration: `${duration}s`,
exec: 'obj_write',
gracefulStop: '5s',
};
}
const read_vu_count = parseInt(__ENV.READERS || '0');
if (read_vu_count > 0) {
scenarios.read = {
executor: 'constant-vus',
vus: read_vu_count,
duration: `${duration}s`,
exec: 'obj_read',
gracefulStop: '5s',
};
scenarios.read = {
executor: 'constant-vus',
vus: read_vu_count,
duration: `${duration}s`,
exec: 'obj_read',
gracefulStop: '5s',
};
}
const delete_vu_count = parseInt(__ENV.DELETERS || '0');
if (delete_vu_count > 0) {
if (!obj_to_delete_selector) {
throw new Error(
'Positive DELETE worker number without a proper object selector');
}
if (!obj_to_delete_selector) {
throw new Error('Positive DELETE worker number without a proper object selector');
}
scenarios.delete = {
executor: 'constant-vus',
vus: delete_vu_count,
duration: `${duration}s`,
exec: 'obj_delete',
gracefulStop: '5s',
};
scenarios.delete = {
executor: 'constant-vus',
vus: delete_vu_count,
duration: `${duration}s`,
exec: 'obj_delete',
gracefulStop: '5s',
};
}
export const options = {
scenarios,
setupTimeout: '5s',
scenarios,
setupTimeout: '5s',
};
export function setup() {
const total_vu_count = write_vu_count + read_vu_count + delete_vu_count;
const total_vu_count = write_vu_count + read_vu_count + delete_vu_count;
console.log(`Pregenerated containers: ${container_list.length}`);
console.log(`Pregenerated read object size: ${read_size}`);
console.log(`Pregenerated total objects: ${obj_list.length}`);
console.log(`Reading VUs: ${read_vu_count}`);
console.log(`Writing VUs: ${write_vu_count}`);
console.log(`Deleting VUs: ${delete_vu_count}`);
console.log(`Total VUs: ${total_vu_count}`);
const start_timestamp = Date.now()
console.log(
`Load started at: ${Date(start_timestamp).toString()}`)
console.log(`Pregenerated containers: ${container_list.length}`);
console.log(`Pregenerated read object size: ${read_size}`);
console.log(`Pregenerated total objects: ${obj_list.length}`);
console.log(`Reading VUs: ${read_vu_count}`);
console.log(`Writing VUs: ${write_vu_count}`);
console.log(`Deleting VUs: ${delete_vu_count}`);
console.log(`Total VUs: ${total_vu_count}`);
const start_timestamp = Date.now()
console.log(`Load started at: ${Date(start_timestamp).toString()}`)
}
export function teardown(data) {
if (obj_registry) {
obj_registry.close();
}
const end_timestamp = Date.now()
console.log(
`Load finished at: ${Date(end_timestamp).toString()}`)
if (obj_registry) {
obj_registry.close();
}
const end_timestamp = Date.now()
console.log(`Load finished at: ${Date(end_timestamp).toString()}`)
}
export function handleSummary(data) {
return {
'stdout': textSummary(data, { indent: ' ', enableColors: false }),
[summary_json]: JSON.stringify(data),
};
return {
'stdout': textSummary(data, { indent: ' ', enableColors: false }),
[summary_json]: JSON.stringify(data),
};
}
export function obj_write() {
if (__ENV.SLEEP_WRITE) {
sleep(__ENV.SLEEP_WRITE);
}
if (__ENV.SLEEP_WRITE) {
sleep(__ENV.SLEEP_WRITE);
}
const headers = { unique_header: uuidv4() };
const container =
container_list[Math.floor(Math.random() * container_list.length)];
const headers = {
unique_header: uuidv4()
};
const container = container_list[Math.floor(Math.random() * container_list.length)];
const payload = generator.genPayload();
const resp =
grpc_client.put(container, headers, payload, write_grpc_chunk_size);
if (!resp.success) {
log.withField('cid', container).error(resp.error);
return;
}
const payload = generator.genPayload();
const resp = grpc_client.put(container, headers, payload, write_grpc_chunk_size);
if (!resp.success) {
log.withField("cid", container).error(resp.error);
return;
}
if (obj_registry) {
obj_registry.addObject(container, resp.object_id, '', '', payload.hash());
}
if (obj_registry) {
obj_registry.addObject(container, resp.object_id, "", "", payload.hash());
}
}
export function obj_read() {
if (__ENV.SLEEP_READ) {
sleep(__ENV.SLEEP_READ);
}
if (obj_to_read_selector) {
const obj = obj_to_read_selector.nextObject();
if (!obj) {
return;
if (__ENV.SLEEP_READ) {
sleep(__ENV.SLEEP_READ);
}
const resp = grpc_client.get(obj.c_id, obj.o_id)
if(obj_to_read_selector) {
const obj = obj_to_read_selector.nextObject();
if (!obj) {
return;
}
const resp = grpc_client.get(obj.c_id, obj.o_id)
if (!resp.success) {
log.withFields({cid: obj.c_id, oid: obj.o_id}).error(resp.error);
}
return
}
const obj = obj_list[Math.floor(Math.random() * obj_list.length)];
const resp = grpc_client.get(obj.container, obj.object)
if (!resp.success) {
log.withFields({ cid: obj.c_id, oid: obj.o_id }).error(resp.error);
log.withFields({cid: obj.container, oid: obj.object}).error(resp.error);
}
return
}
const obj = obj_list[Math.floor(Math.random() * obj_list.length)];
const resp = grpc_client.get(obj.container, obj.object)
if (!resp.success) {
log.withFields({ cid: obj.container, oid: obj.object }).error(resp.error);
}
}
export function obj_delete() {
if (__ENV.SLEEP_DELETE) {
sleep(__ENV.SLEEP_DELETE);
}
const obj = obj_to_delete_selector.nextObject();
if (!obj) {
if (obj_to_delete_exit_on_null) {
exec.test.abort("No more objects to select");
if (__ENV.SLEEP_DELETE) {
sleep(__ENV.SLEEP_DELETE);
}
return;
}
const resp = grpc_client.delete(obj.c_id, obj.o_id);
if (!resp.success) {
// Log errors except (2052 - object already deleted)
log.withFields({ cid: obj.c_id, oid: obj.o_id }).error(resp.error);
return;
}
const obj = obj_to_delete_selector.nextObject();
if (!obj) {
return;
}
obj_registry.deleteObject(obj.id);
const resp = grpc_client.delete(obj.c_id, obj.o_id);
if (!resp.success) {
// Log errors except (2052 - object already deleted)
log.withFields({cid: obj.c_id, oid: obj.o_id}).error(resp.error);
return;
}
obj_registry.deleteObject(obj.id);
}

View file

@ -1,69 +1,71 @@
import { sleep } from 'k6';
import { SharedArray } from 'k6/data';
import logging from 'k6/x/frostfs/logging';
import native from 'k6/x/frostfs/native';
import logging from 'k6/x/frostfs/logging';
import registry from 'k6/x/frostfs/registry';
import stats from 'k6/x/frostfs/stats';
import { newGenerator } from './libs/datagen.js';
import { parseEnv } from './libs/env-parser.js';
import { SharedArray } from 'k6/data';
import { sleep } from 'k6';
import { textSummary } from './libs/k6-summary-0.0.2.js';
import { parseEnv } from './libs/env-parser.js';
import { uuidv4 } from './libs/k6-utils-1.4.0.js';
import { newGenerator } from './libs/datagen.js';
parseEnv();
const obj_list = new SharedArray('obj_list', function () {
return JSON.parse(open(__ENV.PREGEN_JSON)).objects;
return JSON.parse(open(__ENV.PREGEN_JSON)).objects;
});
const container_list = new SharedArray('container_list', function () {
return JSON.parse(open(__ENV.PREGEN_JSON)).containers;
return JSON.parse(open(__ENV.PREGEN_JSON)).containers;
});
const read_size = JSON.parse(open(__ENV.PREGEN_JSON)).obj_size;
const summary_json = __ENV.SUMMARY_JSON || '/tmp/summary.json';
const summary_json = __ENV.SUMMARY_JSON || "/tmp/summary.json";
// Select random gRPC endpoint for current VU
const grpc_endpoints = __ENV.GRPC_ENDPOINTS.split(',');
const grpc_endpoint =
grpc_endpoints[Math.floor(Math.random() * grpc_endpoints.length)];
const grpc_client = native.connect(
grpc_endpoint, '', __ENV.DIAL_TIMEOUT ? parseInt(__ENV.DIAL_TIMEOUT) : 5,
__ENV.STREAM_TIMEOUT ? parseInt(__ENV.STREAM_TIMEOUT) : 60,
__ENV.PREPARE_LOCALLY ? __ENV.PREPARE_LOCALLY.toLowerCase() === 'true' : false,
1024 * parseInt(__ENV.MAX_OBJECT_SIZE || '0'));
const log = logging.new().withField('endpoint', grpc_endpoint);
const grpc_endpoint = grpc_endpoints[Math.floor(Math.random() * grpc_endpoints.length)];
const grpc_client = native.connect(grpc_endpoint, '',
__ENV.DIAL_TIMEOUT ? parseInt(__ENV.DIAL_TIMEOUT) : 5,
__ENV.STREAM_TIMEOUT ? parseInt(__ENV.STREAM_TIMEOUT) : 60,
__ENV.PREPARE_LOCALLY ? __ENV.PREPARE_LOCALLY.toLowerCase() === "true" : false);
const log = logging.new().withField("endpoint", grpc_endpoint);
const registry_enabled = !!__ENV.REGISTRY_FILE;
const obj_registry =
registry_enabled ? registry.open(__ENV.REGISTRY_FILE) : undefined;
const obj_registry = registry_enabled ? registry.open(__ENV.REGISTRY_FILE) : undefined;
const duration = __ENV.DURATION;
if (!!__ENV.METRIC_TAGS) {
stats.setTags(__ENV.METRIC_TAGS)
stats.setTags(__ENV.METRIC_TAGS)
}
const delete_age = __ENV.DELETE_AGE ? parseInt(__ENV.DELETE_AGE) : undefined;
let obj_to_delete_selector = undefined;
if (registry_enabled && delete_age) {
obj_to_delete_selector = registry.getSelector(
__ENV.REGISTRY_FILE, 'obj_to_delete',
__ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0, {
status: 'created',
age: delete_age,
});
obj_to_delete_selector = registry.getSelector(
__ENV.REGISTRY_FILE,
"obj_to_delete",
__ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0,
{
status: "created",
age: delete_age,
}
);
}
const read_age = __ENV.READ_AGE ? parseInt(__ENV.READ_AGE) : 10;
let obj_to_read_selector = undefined;
if (registry_enabled) {
obj_to_read_selector = registry.getLoopedSelector(
__ENV.REGISTRY_FILE, 'obj_to_read',
__ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0, {
status: 'created',
age: read_age,
})
obj_to_read_selector = registry.getSelector(
__ENV.REGISTRY_FILE,
"obj_to_read",
__ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0,
{
status: "created",
age: read_age,
}
)
}
const scenarios = {};
@ -75,164 +77,160 @@ const write_rate = parseInt(__ENV.WRITE_RATE || '0');
const write_grpc_chunk_size = 1024 * parseInt(__ENV.GRPC_CHUNK_SIZE || '0')
const generator = newGenerator(write_rate > 0);
if (write_rate > 0) {
scenarios.write = {
executor: 'constant-arrival-rate',
duration: `${duration}s`,
preAllocatedVUs: pre_alloc_write_vus,
maxVUs: max_write_vus,
rate: write_rate,
timeUnit: time_unit,
exec: 'obj_write',
gracefulStop: '5s',
};
scenarios.write = {
executor: 'constant-arrival-rate',
duration: `${duration}s`,
preAllocatedVUs: pre_alloc_write_vus,
maxVUs: max_write_vus,
rate: write_rate,
timeUnit: time_unit,
exec: 'obj_write',
gracefulStop: '5s',
};
}
const pre_alloc_read_vus = parseInt(__ENV.PRE_ALLOC_READERS || '0');
const max_read_vus = parseInt(__ENV.MAX_READERS || pre_alloc_read_vus);
const read_rate = parseInt(__ENV.READ_RATE || '0');
if (read_rate > 0) {
scenarios.read = {
executor: 'constant-arrival-rate',
duration: `${duration}s`,
preAllocatedVUs: pre_alloc_write_vus,
maxVUs: max_read_vus,
rate: read_rate,
timeUnit: time_unit,
exec: 'obj_read',
gracefulStop: '5s',
};
scenarios.read = {
executor: 'constant-arrival-rate',
duration: `${duration}s`,
preAllocatedVUs: pre_alloc_write_vus,
maxVUs: max_read_vus,
rate: read_rate,
timeUnit: time_unit,
exec: 'obj_read',
gracefulStop: '5s',
};
}
const pre_alloc_delete_vus = parseInt(__ENV.PRE_ALLOC_DELETERS || '0');
const max_delete_vus = parseInt(__ENV.MAX_DELETERS || pre_alloc_write_vus);
const delete_rate = parseInt(__ENV.DELETE_RATE || '0');
if (delete_rate > 0) {
if (!obj_to_delete_selector) {
throw new Error(
'Positive DELETE worker number without a proper object selector');
}
if (!obj_to_delete_selector) {
throw new Error('Positive DELETE worker number without a proper object selector');
}
scenarios.delete = {
executor: 'constant-arrival-rate',
duration: `${duration}s`,
preAllocatedVUs: pre_alloc_delete_vus,
maxVUs: max_delete_vus,
rate: delete_rate,
timeUnit: time_unit,
exec: 'obj_delete',
gracefulStop: '5s',
};
scenarios.delete = {
executor: 'constant-arrival-rate',
duration: `${duration}s`,
preAllocatedVUs: pre_alloc_delete_vus,
maxVUs: max_delete_vus,
rate: delete_rate,
timeUnit: time_unit,
exec: 'obj_delete',
gracefulStop: '5s',
};
}
export const options = {
scenarios,
setupTimeout: '5s',
scenarios,
setupTimeout: '5s',
};
export function setup() {
const total_pre_allocated_vu_count =
pre_alloc_write_vus + pre_alloc_read_vus + pre_alloc_delete_vus;
const total_max_vu_count = max_read_vus + max_write_vus + max_delete_vus
const total_pre_allocated_vu_count = pre_alloc_write_vus + pre_alloc_read_vus + pre_alloc_delete_vus;
const total_max_vu_count = max_read_vus + max_write_vus + max_delete_vus
console.log(`Pregenerated containers: ${container_list.length}`);
console.log(`Pregenerated read object size: ${read_size}`);
console.log(`Pregenerated total objects: ${obj_list.length}`);
console.log(`Pre allocated reading VUs: ${pre_alloc_read_vus}`);
console.log(`Pre allocated writing VUs: ${pre_alloc_write_vus}`);
console.log(`Pre allocated deleting VUs: ${pre_alloc_delete_vus}`);
console.log(`Total pre allocated VUs: ${total_pre_allocated_vu_count}`);
console.log(`Max reading VUs: ${max_read_vus}`);
console.log(`Max writing VUs: ${max_write_vus}`);
console.log(`Max deleting VUs: ${max_delete_vus}`);
console.log(`Total max VUs: ${total_max_vu_count}`);
console.log(`Time unit: ${time_unit}`);
console.log(`Read rate: ${read_rate}`);
console.log(`Writing rate: ${write_rate}`);
console.log(`Delete rate: ${delete_rate}`);
const start_timestamp = Date.now()
console.log(
`Load started at: ${Date(start_timestamp).toString()}`)
console.log(`Pregenerated containers: ${container_list.length}`);
console.log(`Pregenerated read object size: ${read_size}`);
console.log(`Pregenerated total objects: ${obj_list.length}`);
console.log(`Pre allocated reading VUs: ${pre_alloc_read_vus}`);
console.log(`Pre allocated writing VUs: ${pre_alloc_write_vus}`);
console.log(`Pre allocated deleting VUs: ${pre_alloc_delete_vus}`);
console.log(`Total pre allocated VUs: ${total_pre_allocated_vu_count}`);
console.log(`Max reading VUs: ${max_read_vus}`);
console.log(`Max writing VUs: ${max_write_vus}`);
console.log(`Max deleting VUs: ${max_delete_vus}`);
console.log(`Total max VUs: ${total_max_vu_count}`);
console.log(`Time unit: ${time_unit}`);
console.log(`Read rate: ${read_rate}`);
console.log(`Writing rate: ${write_rate}`);
console.log(`Delete rate: ${delete_rate}`);
const start_timestamp = Date.now()
console.log(`Load started at: ${Date(start_timestamp).toString()}`)
}
export function teardown(data) {
if (obj_registry) {
obj_registry.close();
}
const end_timestamp = Date.now()
console.log(
`Load finished at: ${Date(end_timestamp).toString()}`)
if (obj_registry) {
obj_registry.close();
}
const end_timestamp = Date.now()
console.log(`Load finished at: ${Date(end_timestamp).toString()}`)
}
export function handleSummary(data) {
return {
'stdout': textSummary(data, { indent: ' ', enableColors: false }),
[summary_json]: JSON.stringify(data),
};
return {
'stdout': textSummary(data, { indent: ' ', enableColors: false }),
[summary_json]: JSON.stringify(data),
};
}
export function obj_write() {
if (__ENV.SLEEP_WRITE) {
sleep(__ENV.SLEEP_WRITE);
}
if (__ENV.SLEEP_WRITE) {
sleep(__ENV.SLEEP_WRITE);
}
const headers = { unique_header: uuidv4() };
const container =
container_list[Math.floor(Math.random() * container_list.length)];
const headers = {
unique_header: uuidv4()
};
const container = container_list[Math.floor(Math.random() * container_list.length)];
const payload = generator.genPayload();
const resp =
grpc_client.put(container, headers, payload, write_grpc_chunk_size);
if (!resp.success) {
log.withField('cid', container).error(resp.error);
return;
}
const payload = generator.genPayload();
const resp = grpc_client.put(container, headers, payload, write_grpc_chunk_size);
if (!resp.success) {
log.withField("cid", container).error(resp.error);
return;
}
if (obj_registry) {
obj_registry.addObject(container, resp.object_id, '', '', payload.hash());
}
if (obj_registry) {
obj_registry.addObject(container, resp.object_id, "", "", payload.hash());
}
}
export function obj_read() {
if (__ENV.SLEEP_READ) {
sleep(__ENV.SLEEP_READ);
}
if (obj_to_read_selector) {
const obj = obj_to_read_selector.nextObject();
if (!obj) {
return;
if (__ENV.SLEEP_READ) {
sleep(__ENV.SLEEP_READ);
}
const resp = grpc_client.get(obj.c_id, obj.o_id)
if(obj_to_read_selector) {
const obj = obj_to_read_selector.nextObject();
if (!obj) {
return;
}
const resp = grpc_client.get(obj.c_id, obj.o_id)
if (!resp.success) {
log.withFields({cid: obj.c_id, oid: obj.o_id}).error(resp.error);
}
return
}
const obj = obj_list[Math.floor(Math.random() * obj_list.length)];
const resp = grpc_client.get(obj.container, obj.object)
if (!resp.success) {
log.withFields({ cid: obj.c_id, oid: obj.o_id }).error(resp.error);
log.withFields({cid: obj.container, oid: obj.object}).error(resp.error);
}
return
}
const obj = obj_list[Math.floor(Math.random() * obj_list.length)];
const resp = grpc_client.get(obj.container, obj.object)
if (!resp.success) {
log.withFields({ cid: obj.container, oid: obj.object }).error(resp.error);
}
}
export function obj_delete() {
if (__ENV.SLEEP_DELETE) {
sleep(__ENV.SLEEP_DELETE);
}
if (__ENV.SLEEP_DELETE) {
sleep(__ENV.SLEEP_DELETE);
}
const obj = obj_to_delete_selector.nextObject();
if (!obj) {
return;
}
const obj = obj_to_delete_selector.nextObject();
if (!obj) {
return;
}
const resp = grpc_client.delete(obj.c_id, obj.o_id);
if (!resp.success) {
// Log errors except (2052 - object already deleted)
log.withFields({ cid: obj.c_id, oid: obj.o_id }).error(resp.error);
return;
}
const resp = grpc_client.delete(obj.c_id, obj.o_id);
if (!resp.success) {
// Log errors except (2052 - object already deleted)
log.withFields({cid: obj.c_id, oid: obj.o_id}).error(resp.error);
return;
}
obj_registry.deleteObject(obj.id);
obj_registry.deleteObject(obj.id);
}

View file

@ -1,42 +1,39 @@
import {sleep} from 'k6';
import {SharedArray} from 'k6/data';
import http from 'k6/http';
import logging from 'k6/x/frostfs/logging';
import registry from 'k6/x/frostfs/registry';
import stats from 'k6/x/frostfs/stats';
import {newGenerator} from './libs/datagen.js';
import {parseEnv} from './libs/env-parser.js';
import {textSummary} from './libs/k6-summary-0.0.2.js';
import {uuidv4} from './libs/k6-utils-1.4.0.js';
import http from 'k6/http';
import { SharedArray } from 'k6/data';
import { sleep } from 'k6';
import { textSummary } from './libs/k6-summary-0.0.2.js';
import { parseEnv } from './libs/env-parser.js';
import { uuidv4 } from './libs/k6-utils-1.4.0.js';
import { newGenerator } from './libs/datagen.js';
parseEnv();
const obj_list = new SharedArray('obj_list', function() {
return JSON.parse(open(__ENV.PREGEN_JSON)).objects;
const obj_list = new SharedArray('obj_list', function () {
return JSON.parse(open(__ENV.PREGEN_JSON)).objects;
});
const container_list = new SharedArray('container_list', function() {
return JSON.parse(open(__ENV.PREGEN_JSON)).containers;
const container_list = new SharedArray('container_list', function () {
return JSON.parse(open(__ENV.PREGEN_JSON)).containers;
});
const read_size = JSON.parse(open(__ENV.PREGEN_JSON)).obj_size;
const summary_json = __ENV.SUMMARY_JSON || '/tmp/summary.json';
const summary_json = __ENV.SUMMARY_JSON || "/tmp/summary.json";
// Select random HTTP endpoint for current VU
const http_endpoints = __ENV.HTTP_ENDPOINTS.split(',');
const http_endpoint =
http_endpoints[Math.floor(Math.random() * http_endpoints.length)];
const log = logging.new().withField('endpoint', http_endpoint);
const http_endpoint = http_endpoints[Math.floor(Math.random() * http_endpoints.length)];
const log = logging.new().withField("endpoint", http_endpoint);
const registry_enabled = !!__ENV.REGISTRY_FILE;
const obj_registry =
registry_enabled ? registry.open(__ENV.REGISTRY_FILE) : undefined;
const obj_registry = registry_enabled ? registry.open(__ENV.REGISTRY_FILE) : undefined;
const duration = __ENV.DURATION;
if (!!__ENV.METRIC_TAGS) {
stats.setTags(__ENV.METRIC_TAGS)
stats.setTags(__ENV.METRIC_TAGS)
}
const scenarios = {};
@ -44,100 +41,94 @@ const scenarios = {};
const write_vu_count = parseInt(__ENV.WRITERS || '0');
const generator = newGenerator(write_vu_count > 0);
if (write_vu_count > 0) {
scenarios.write = {
executor: 'constant-vus',
vus: write_vu_count,
duration: `${duration}s`,
exec: 'obj_write',
gracefulStop: '5s',
}
scenarios.write = {
executor: 'constant-vus',
vus: write_vu_count,
duration: `${duration}s`,
exec: 'obj_write',
gracefulStop: '5s',
}
}
const read_vu_count = parseInt(__ENV.READERS || '0');
if (read_vu_count > 0) {
scenarios.read = {
executor: 'constant-vus',
vus: read_vu_count,
duration: `${duration}s`,
exec: 'obj_read',
gracefulStop: '5s',
}
scenarios.read = {
executor: 'constant-vus',
vus: read_vu_count,
duration: `${duration}s`,
exec: 'obj_read',
gracefulStop: '5s',
}
}
export const options = {
scenarios,
setupTimeout: '5s',
scenarios,
setupTimeout: '5s',
};
export function setup() {
const total_vu_count = write_vu_count + read_vu_count;
const total_vu_count = write_vu_count + read_vu_count;
console.log(`Pregenerated containers: ${container_list.length}`);
console.log(`Pregenerated read object size: ${read_size}`);
console.log(`Pregenerated total objects: ${obj_list.length}`);
console.log(`Reading VUs: ${read_vu_count}`);
console.log(`Writing VUs: ${write_vu_count}`);
console.log(`Total VUs: ${total_vu_count}`);
console.log(`Pregenerated containers: ${container_list.length}`);
console.log(`Pregenerated read object size: ${read_size}`);
console.log(`Pregenerated total objects: ${obj_list.length}`);
console.log(`Reading VUs: ${read_vu_count}`);
console.log(`Writing VUs: ${write_vu_count}`);
console.log(`Total VUs: ${total_vu_count}`);
const start_timestamp = Date.now()
console.log(
`Load started at: ${Date(start_timestamp).toString()}`)
const start_timestamp = Date.now()
console.log(`Load started at: ${Date(start_timestamp).toString()}`)
}
export function teardown(data) {
if (obj_registry) {
obj_registry.close();
}
const end_timestamp = Date.now()
console.log(
`Load finished at: ${Date(end_timestamp).toString()}`)
if (obj_registry) {
obj_registry.close();
}
const end_timestamp = Date.now()
console.log(`Load finished at: ${Date(end_timestamp).toString()}`)
}
export function handleSummary(data) {
return {
'stdout': textSummary(data, {indent: ' ', enableColors: false}),
[summary_json]: JSON.stringify(data),
};
return {
'stdout': textSummary(data, { indent: ' ', enableColors: false }),
[summary_json]: JSON.stringify(data),
};
}
export function obj_write() {
if (__ENV.SLEEP_WRITE) {
sleep(__ENV.SLEEP_WRITE);
}
if (__ENV.SLEEP_WRITE) {
sleep(__ENV.SLEEP_WRITE);
}
const container =
container_list[Math.floor(Math.random() * container_list.length)];
const container = container_list[Math.floor(Math.random() * container_list.length)];
const payload = generator.genPayload();
const data = {
field: uuidv4(),
// Because we use `file` wrapping and it is not straightforward to use
// streams here,
// `-e STREAMING=1` has no effect for this scenario.
file: http.file(payload.bytes(), 'random.data'),
};
const payload = generator.genPayload();
const data = {
field: uuidv4(),
// Because we use `file` wrapping and it is not straightforward to use streams here,
// `-e STREAMING=1` has no effect for this scenario.
file: http.file(payload.bytes(), "random.data"),
};
const resp = http.post(`http://${http_endpoint}/upload/${container}`, data);
if (resp.status != 200) {
log.withFields({status: resp.status, cid: container}).error(resp.error);
return;
}
const object_id = JSON.parse(resp.body).object_id;
if (obj_registry) {
obj_registry.addObject(container, object_id, '', '', payload.hash());
}
const resp = http.post(`http://${http_endpoint}/upload/${container}`, data);
if (resp.status != 200) {
log.withFields({status: resp.status, cid: container}).error(resp.error);
return;
}
const object_id = JSON.parse(resp.body).object_id;
if (obj_registry) {
obj_registry.addObject(container, object_id, "", "", payload.hash());
}
}
export function obj_read() {
if (__ENV.SLEEP_READ) {
sleep(__ENV.SLEEP_READ);
}
if (__ENV.SLEEP_READ) {
sleep(__ENV.SLEEP_READ);
}
const obj = obj_list[Math.floor(Math.random() * obj_list.length)];
const resp =
http.get(`http://${http_endpoint}/get/${obj.container}/${obj.object}`);
if (resp.status != 200) {
log.withFields({status: resp.status, cid: obj.container, oid: obj.object})
.error(resp.error);
}
const obj = obj_list[Math.floor(Math.random() * obj_list.length)];
const resp = http.get(`http://${http_endpoint}/get/${obj.container}/${obj.object}`);
if (resp.status != 200) {
log.withFields({status: resp.status, cid: obj.container, oid: obj.object}).error(resp.error);
}
}

View file

@ -1,2 +1,2 @@
(()=>{"use strict";var t={n:r=>{var e=r&&r.__esModule?()=>r.default:()=>r;return t.d(e,{a:e}),e},d:(r,e)=>{for(var n in e)t.o(e,n)&&!t.o(r,n)&&Object.defineProperty(r,n,{enumerable:!0,get:e[n]})},o:(t,r)=>Object.prototype.hasOwnProperty.call(t,r),r:t=>{"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(t,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(t,"__esModule",{value:!0})}},r={};t.r(r),t.d(r,{findBetween:()=>x,getCurrentStageIndex:()=>i,normalDistributionStages:()=>m,parseDuration:()=>o,randomIntBetween:()=>d,randomItem:()=>h,randomString:()=>p,tagWithCurrentStageIndex:()=>u,tagWithCurrentStageProfile:()=>s,uuidv4:()=>g});const e=require("k6/execution");var n=t.n(e);function o(t){if(null==t||t.length<1)throw new Error("str is empty");for(var r=0,e="",n={},o=0;o<t.length;o++)if((a(t[o])||"."==t[o])&&(e+=t[o]),null!=t[o+1]&&!a(t[o+1])&&"."!=t[o+1]){var i=parseFloat(e,10),u=t[o+1];switch(u){case"d":r+=24*i*60*60*1e3;break;case"h":r+=60*i*60*1e3;break;case"m":o+2<t.length&&"s"==t[o+2]?(r+=Math.trunc(i),o++,u="ms"):r+=60*i*1e3;break;case"s":r+=1e3*i;break;default:throw new Error("".concat(u," is an unsupported time unit"))}if(n[u])throw new Error("".concat(u," time unit is provided multiple times"));n[u]=!0,o++,e=""}return e.length>0&&(r+=parseFloat(e,10)),r}function a(t){return t>="0"&&t<="9"}function i(){if(null==n()||null==n().test||null==n().test.options)throw new Error("k6/execution.test.options is undefined - getCurrentStageIndex requires a k6 v0.38.0 or later. Please, upgrade for getting k6/execution.test.options supported.");var t=n().test.options.scenarios[n().scenario.name];if(null==t)throw new Error("the exec.test.options object doesn't contain the current scenario ".concat(n().scenario.name));if(null==t.stages)throw new Error("only ramping-vus or ramping-arravial-rate supports stages, it is not possible to get a stage index on other executors.");if(t.stages.length<1)throw new Error("the current scenario ".concat(t.name," doesn't contain any stage"));for(var r=0,e=new Date-n().scenario.startTime,a=0;a<t.stages.length;a++)if(e<(r+=o(t.stages[a].duration)))return a;return t.stages.length-1}function u(){n().vu.tags.stage=i()}function s(){n().vu.tags.stage_profile=function(){var t=i();if(t<1)return"ramp-up";var r=n().test.options.scenarios[n().scenario.name].stages,e=r[t],o=r[t-1];return e.target>o.target?"ramp-up":o.target==e.target?"steady":"ramp-down"}()}const l=require("k6/crypto");function c(t){return function(t){if(Array.isArray(t))return f(t)}(t)||function(t){if("undefined"!=typeof Symbol&&null!=t[Symbol.iterator]||null!=t["@@iterator"])return Array.from(t)}(t)||function(t,r){if(!t)return;if("string"==typeof t)return f(t,r);var e=Object.prototype.toString.call(t).slice(8,-1);"Object"===e&&t.constructor&&(e=t.constructor.name);if("Map"===e||"Set"===e)return Array.from(t);if("Arguments"===e||/^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(e))return f(t,r)}(t)||function(){throw new TypeError("Invalid attempt to spread non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")}()}function f(t,r){(null==r||r>t.length)&&(r=t.length);for(var e=0,n=new Array(r);e<r;e++)n[e]=t[e];return n}function g(){var t=arguments.length>0&&void 0!==arguments[0]&&arguments[0];return t?y():v()}function d(t,r){return Math.floor(Math.random()*(r-t+1)+t)}function h(t){return t[Math.floor(Math.random()*t.length)]}function p(t){for(var r=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"abcdefghijklmnopqrstuvwxyz",e="";t--;)e+=r[Math.random()*r.length|0];return e}function x(t,r,e){for(var n,o=arguments.length>3&&void 0!==arguments[3]&&arguments[3],a=[],i=!0,u=0;i&&-1!=(n=t.indexOf(r))&&(n+=r.length,-1!=(u=t.indexOf(e,n)));){var s=t.substring(n,u);if(!o)return s;a.push(s),t=t.substring(u+e.length)}return a.length?a:null}function m(t,r){var e=arguments.length>2&&void 0!==arguments[2]?arguments[2]:10;function n(t,r,e){return Math.exp(-.5*Math.pow((e-t)/r,2))/(r*Math.sqrt(2*Math.PI))}for(var o=0,a=1,i=new Array(e+2).fill(0),u=new Array(e+2).fill(Math.ceil(r/6)),s=[],l=0;l<=e;l++)i[l]=n(o,a,-2*a+4*a*l/e);for(var f=Math.max.apply(Math,c(i)),g=i.map((function(r){return Math.round(r*t/f)})),d=1;d<=e;d++)u[d]=Math.ceil(4*r/(6*e));for(var h=0;h<=e+1;h++)s.push({duration:"".concat(u[h],"s"),target:g[h]});return s}function v(){return"xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx".replace(/[xy]/g,(function(t){var r=16*Math.random()|0;return("x"===t?r:3&r|8).toString(16)}))}function y(){for(var t=[],r=0;r<256;++r)t.push((r+256).toString(16).slice(1));var e=new Uint8Array((0,l.randomBytes)(16));return e[6]=15&e[6]|64,e[8]=63&e[8]|128,(t[e[0]]+t[e[1]]+t[e[2]]+t[e[3]]+"-"+t[e[4]]+t[e[5]]+"-"+t[e[6]]+t[e[7]]+"-"+t[e[8]]+t[e[9]]+"-"+t[e[10]]+t[e[11]]+t[e[12]]+t[e[13]]+t[e[14]]+t[e[15]]).toLowerCase()}var w=exports;for(var b in r)w[b]=r[b];r.__esModule&&Object.defineProperty(w,"__esModule",{value:!0})})();
//# sourceMappingURL=index.js.map

View file

@ -1,34 +0,0 @@
import { uuidv4 } from './k6-utils-1.4.0.js';
export function generateS3Key() {
let width = parseInt(__ENV.DIR_WIDTH || '0');
let height = parseInt(__ENV.DIR_HEIGHT || '0');
let key = ''
if (width > 0 && height > 0) {
for (let index = 0; index < height; index++) {
const w = Math.floor(Math.random() * width) + 1;
key = key + 'dir' + w + '/';
}
}
key += objName();
return key;
}
const asciiLetters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890"
function objName() {
if (__ENV.OBJ_NAME) {
return __ENV.OBJ_NAME;
}
const length = parseInt(__ENV.OBJ_NAME_LENGTH || '0');
if (length > 0) {
let name = "";
for (let i = 0; i < length; i++) {
name += asciiLetters.charAt(Math.floor(Math.random() * asciiLetters.length));
}
return name;
}
return uuidv4();
}

View file

@ -1,57 +1,55 @@
import {SharedArray} from 'k6/data';
import exec from 'k6/execution';
import local from 'k6/x/frostfs/local';
import logging from 'k6/x/frostfs/logging';
import registry from 'k6/x/frostfs/registry';
import stats from 'k6/x/frostfs/stats';
import {newGenerator} from './libs/datagen.js';
import {parseEnv} from './libs/env-parser.js';
import {textSummary} from './libs/k6-summary-0.0.2.js';
import {uuidv4} from './libs/k6-utils-1.4.0.js';
import { SharedArray } from 'k6/data';
import { textSummary } from './libs/k6-summary-0.0.2.js';
import { parseEnv } from './libs/env-parser.js';
import { uuidv4 } from './libs/k6-utils-1.4.0.js';
import exec from 'k6/execution';
import { newGenerator } from './libs/datagen.js';
parseEnv();
const obj_list = new SharedArray('obj_list', function() {
return JSON.parse(open(__ENV.PREGEN_JSON)).objects;
const obj_list = new SharedArray('obj_list', function () {
return JSON.parse(open(__ENV.PREGEN_JSON)).objects;
});
const container_list = new SharedArray('container_list', function() {
return JSON.parse(open(__ENV.PREGEN_JSON)).containers;
const container_list = new SharedArray('container_list', function () {
return JSON.parse(open(__ENV.PREGEN_JSON)).containers;
});
const read_size = JSON.parse(open(__ENV.PREGEN_JSON)).obj_size;
const summary_json = __ENV.SUMMARY_JSON || '/tmp/summary.json';
const summary_json = __ENV.SUMMARY_JSON || "/tmp/summary.json";
const config_file = __ENV.CONFIG_FILE;
const config_dir = __ENV.CONFIG_DIR;
const debug_logger = (__ENV.DEBUG_LOGGER || 'false') == 'true';
const max_total_size_gb =
__ENV.MAX_TOTAL_SIZE_GB ? parseInt(__ENV.MAX_TOTAL_SIZE_GB) : 0;
const local_client =
local.connect(config_file, config_dir, '', debug_logger, max_total_size_gb);
const log = logging.new().withFields(
{'config_file': config_file, 'config_dir': config_dir});
const max_total_size_gb = __ENV.MAX_TOTAL_SIZE_GB ? parseInt(__ENV.MAX_TOTAL_SIZE_GB) : 0;
const local_client = local.connect(config_file, config_dir, '', debug_logger, max_total_size_gb);
const log = logging.new().withFields({"config_file": config_file,"config_dir": config_dir});
const registry_enabled = !!__ENV.REGISTRY_FILE;
const obj_registry =
registry_enabled ? registry.open(__ENV.REGISTRY_FILE) : undefined;
const obj_registry = registry_enabled ? registry.open(__ENV.REGISTRY_FILE) : undefined;
const duration = __ENV.DURATION;
if (!!__ENV.METRIC_TAGS) {
stats.setTags(__ENV.METRIC_TAGS)
stats.setTags(__ENV.METRIC_TAGS)
}
const delete_age = __ENV.DELETE_AGE ? parseInt(__ENV.DELETE_AGE) : undefined;
let obj_to_delete_selector = undefined;
if (registry_enabled && delete_age) {
obj_to_delete_selector = registry.getSelector(
__ENV.REGISTRY_FILE, 'obj_to_delete',
__ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0, {
status: 'created',
age: delete_age,
});
obj_to_delete_selector = registry.getSelector(
__ENV.REGISTRY_FILE,
"obj_to_delete",
__ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0,
{
status: "created",
age: delete_age,
}
);
}
const scenarios = {};
@ -59,119 +57,117 @@ const scenarios = {};
const write_vu_count = parseInt(__ENV.WRITERS || '0');
const generator = newGenerator(write_vu_count > 0);
if (write_vu_count > 0) {
scenarios.write = {
executor: 'constant-vus',
vus: write_vu_count,
duration: `${duration}s`,
exec: 'obj_write',
gracefulStop: '5s',
};
scenarios.write = {
executor: 'constant-vus',
vus: write_vu_count,
duration: `${duration}s`,
exec: 'obj_write',
gracefulStop: '5s',
};
}
const read_vu_count = parseInt(__ENV.READERS || '0');
if (read_vu_count > 0) {
scenarios.read = {
executor: 'constant-vus',
vus: read_vu_count,
duration: `${duration}s`,
exec: 'obj_read',
gracefulStop: '5s',
};
scenarios.read = {
executor: 'constant-vus',
vus: read_vu_count,
duration: `${duration}s`,
exec: 'obj_read',
gracefulStop: '5s',
};
}
const delete_vu_count = parseInt(__ENV.DELETERS || '0');
if (delete_vu_count > 0) {
if (!obj_to_delete_selector) {
throw new Error(
'Positive DELETE worker number without a proper object selector');
}
if (!obj_to_delete_selector) {
throw new Error('Positive DELETE worker number without a proper object selector');
}
scenarios.delete = {
executor: 'constant-vus',
vus: delete_vu_count,
duration: `${duration}s`,
exec: 'obj_delete',
gracefulStop: '5s',
};
scenarios.delete = {
executor: 'constant-vus',
vus: delete_vu_count,
duration: `${duration}s`,
exec: 'obj_delete',
gracefulStop: '5s',
};
}
export const options = {
scenarios,
setupTimeout: '5s',
scenarios,
setupTimeout: '5s',
};
export function setup() {
const total_vu_count = write_vu_count + read_vu_count + delete_vu_count;
const total_vu_count = write_vu_count + read_vu_count + delete_vu_count;
console.log(`Pregenerated containers: ${container_list.length}`);
console.log(`Pregenerated read object size: ${read_size}`);
console.log(`Pregenerated total objects: ${obj_list.length}`);
console.log(`Reading VUs: ${read_vu_count}`);
console.log(`Writing VUs: ${write_vu_count}`);
console.log(`Deleting VUs: ${delete_vu_count}`);
console.log(`Total VUs: ${total_vu_count}`);
console.log(`Pregenerated containers: ${container_list.length}`);
console.log(`Pregenerated read object size: ${read_size}`);
console.log(`Pregenerated total objects: ${obj_list.length}`);
console.log(`Reading VUs: ${read_vu_count}`);
console.log(`Writing VUs: ${write_vu_count}`);
console.log(`Deleting VUs: ${delete_vu_count}`);
console.log(`Total VUs: ${total_vu_count}`);
const start_timestamp = Date.now()
console.log(
`Load started at: ${Date(start_timestamp).toString()}`)
const start_timestamp = Date.now()
console.log(`Load started at: ${Date(start_timestamp).toString()}`)
}
export function teardown(data) {
if (obj_registry) {
obj_registry.close();
}
const end_timestamp = Date.now()
console.log(
`Load finished at: ${Date(end_timestamp).toString()}`)
if (obj_registry) {
obj_registry.close();
}
const end_timestamp = Date.now()
console.log(`Load finished at: ${Date(end_timestamp).toString()}`)
}
export function handleSummary(data) {
return {
'stdout': textSummary(data, {indent: ' ', enableColors: false}),
[summary_json]: JSON.stringify(data),
};
return {
'stdout': textSummary(data, { indent: ' ', enableColors: false }),
[summary_json]: JSON.stringify(data),
};
}
export function obj_write() {
const headers = {unique_header: uuidv4()};
const container =
container_list[Math.floor(Math.random() * container_list.length)];
const headers = {
unique_header: uuidv4()
};
const container = container_list[Math.floor(Math.random() * container_list.length)];
const payload = generator.genPayload();
const resp = local_client.put(container, headers, payload);
if (!resp.success) {
if (resp.abort) {
exec.test.abort(resp.error);
const payload = generator.genPayload();
const resp = local_client.put(container, headers, payload);
if (!resp.success) {
if (resp.abort) {
exec.test.abort(resp.error);
}
log.withField("cid", container).error(resp.error);
return;
}
log.withField('cid', container).error(resp.error);
return;
}
if (obj_registry) {
obj_registry.addObject(container, resp.object_id, '', '', payload.hash());
}
if (obj_registry) {
obj_registry.addObject(container, resp.object_id, "", "", payload.hash());
}
}
export function obj_read() {
const obj = obj_list[Math.floor(Math.random() * obj_list.length)];
const resp = local_client.get(obj.container, obj.object)
if (!resp.success) {
log.withFields({cid: obj.container, oid: obj.object}).error(resp.error);
}
const obj = obj_list[Math.floor(Math.random() * obj_list.length)];
const resp = local_client.get(obj.container, obj.object)
if (!resp.success) {
log.withFields({cid: obj.container, oid: obj.object}).error(resp.error);
}
}
export function obj_delete() {
const obj = obj_to_delete_selector.nextObject();
if (!obj) {
return;
}
const obj = obj_to_delete_selector.nextObject();
if (!obj) {
return;
}
const resp = local_client.delete(obj.c_id, obj.o_id);
if (!resp.success) {
// Log errors except (2052 - object already deleted)
log.withFields({cid: obj.c_id, oid: obj.o_id}).error(resp.error);
return;
}
const resp = local_client.delete(obj.c_id, obj.o_id);
if (!resp.success) {
// Log errors except (2052 - object already deleted)
log.withFields({cid: obj.c_id, oid: obj.o_id}).error(resp.error);
return;
}
obj_registry.deleteObject(obj.id);
obj_registry.deleteObject(obj.id);
}

View file

@ -3,19 +3,15 @@ import uuid
from helpers.cmd import execute_cmd, log
def create_bucket(endpoint, versioning, location, acl, no_verify_ssl):
configuration = ""
def create_bucket(endpoint, versioning, location, no_verify_ssl):
if location:
configuration = f"--create-bucket-configuration 'LocationConstraint={location}'"
if acl:
acl = f"--acl {acl}"
location = f"--create-bucket-configuration 'LocationConstraint={location}'"
bucket_name = str(uuid.uuid4())
no_verify_ssl_str = "--no-verify-ssl" if no_verify_ssl else ""
cmd_line = f"aws {no_verify_ssl_str} s3api create-bucket --bucket {bucket_name} " \
f"--endpoint {endpoint} {configuration} {acl} "
f"--endpoint {endpoint} {location}"
cmd_line_ver = f"aws {no_verify_ssl_str} s3api put-bucket-versioning --bucket {bucket_name} " \
f"--versioning-configuration Status=Enabled --endpoint {endpoint}"
f"--versioning-configuration Status=Enabled --endpoint {endpoint} "
output, success = execute_cmd(cmd_line)
@ -25,7 +21,7 @@ def create_bucket(endpoint, versioning, location, acl, no_verify_ssl):
f"Error: {output}", endpoint)
return False
if versioning:
if versioning == "True":
output, success = execute_cmd(cmd_line_ver)
if not success:
log(f"{cmd_line_ver}\n"
@ -34,7 +30,7 @@ def create_bucket(endpoint, versioning, location, acl, no_verify_ssl):
else:
log(f"Bucket versioning has been applied for bucket {bucket_name}", endpoint)
log(f"Created bucket: {bucket_name} ({location})", endpoint)
log(f"Created bucket: {bucket_name}", endpoint)
return bucket_name

View file

@ -1,16 +1,16 @@
import re
from helpers.cmd import execute_cmd, log
def create_container(endpoint, policy, container_creation_retry, wallet_path, config, rules, local=False, retry=0):
if retry > int(container_creation_retry):
def create_container(endpoint, policy, wallet_path, config, local=False, depth=0):
if depth > 20:
raise ValueError(f"unable to create container: too many unsuccessful attempts")
if wallet_path:
wallet_file = f"--wallet {wallet_path}"
wallet_file = "--wallet " + wallet_path
if config:
wallet_config = f"--config {config}"
wallet_config = "--config " + config
cmd_line = f"frostfs-cli --rpc-endpoint {endpoint} container create {wallet_file} {wallet_config} " \
f" --policy '{policy}' --await"
f" --policy '{policy}' --basic-acl public-read-write --await"
output, success = execute_cmd(cmd_line)
@ -32,21 +32,7 @@ def create_container(endpoint, policy, container_creation_retry, wallet_path, co
raise ValueError(f"no CID was parsed from command output:\t{fst_str}")
cid = splitted[1]
log(f"Created container: {cid} ({policy})", endpoint)
# Add rule for container
if rules:
r = ""
for rule in rules:
r += f" --rule '{rule}' "
cmd_line = f"frostfs-cli --rpc-endpoint {endpoint} ape-manager add {wallet_file} {wallet_config} " \
f" --chain-id 'chain-id' {r} --target-name '{cid}' --target-type 'container'"
output, success = execute_cmd(cmd_line)
if not success:
log(f"{cmd_line}\n"
f"Rule has not been added\n"
f"{output}", endpoint)
return False
log(f"Created container {cid}", endpoint)
if not local:
return cid
@ -100,7 +86,7 @@ def create_container(endpoint, policy, container_creation_retry, wallet_path, co
return cid
log(f"Created container {cid} is not stored on {endpoint}, creating another one...", endpoint)
return create_container(endpoint, policy, container_creation_retry, wallet_path, config, rules, local, retry + 1)
return create_container(endpoint, policy, wallet_path, config, local, depth + 1)
def upload_object(container, payload_filepath, endpoint, wallet_file, wallet_config):

View file

@ -15,21 +15,18 @@ from helpers.frostfs_cli import create_container, upload_object
ERROR_WRONG_CONTAINERS_COUNT = 1
ERROR_WRONG_OBJECTS_COUNT = 2
MAX_WORKERS = 50
DEFAULT_POLICY = "REP 2 IN X CBF 2 SELECT 2 FROM * AS X"
DEFAULT_RULES = ["allow Object.* *"]
parser = argparse.ArgumentParser()
parser.add_argument('--size', help='Upload objects size in kb')
parser.add_argument('--containers', help='Number of containers to create')
parser.add_argument('--retry', default=20, help='Maximum number of retries to create a container')
parser.add_argument('--out', help='JSON file with output')
parser.add_argument('--preload_obj', help='Number of pre-loaded objects')
parser.add_argument('--wallet', help='Wallet file path')
parser.add_argument('--config', help='Wallet config file path')
parser.add_argument(
"--policy",
help=f"Container placement policy. Default is {DEFAULT_POLICY}",
action="append"
help="Container placement policy",
default="REP 2 IN X CBF 2 SELECT 2 FROM * AS X"
)
parser.add_argument('--endpoint', help='Nodes addresses separated by comma.')
parser.add_argument('--update', help='Save existed containers')
@ -38,10 +35,6 @@ parser.add_argument('--workers', help='Count of workers in preset. Max = 50, Def
parser.add_argument('--sleep', help='Time to sleep between containers creation and objects upload (in seconds), '
'Default = 8', default=8)
parser.add_argument('--local', help='Create containers that store data on provided endpoints. Warning: additional empty containers may be created.', action='store_true')
parser.add_argument(
'--rule',
help='Rule attached to created containers. All entries of CONTAINER_ID will be replaced with id of created container.',
action="append")
args: Namespace = parser.parse_args()
print(args)
@ -52,18 +45,12 @@ def main():
objects_list = []
endpoints = args.endpoint.split(',')
if not args.policy:
args.policy = [DEFAULT_POLICY]
container_creation_retry = args.retry
wallet = args.wallet
wallet_config = args.config
workers = int(args.workers)
objects_per_container = int(args.preload_obj)
rules = args.rule
if not rules:
rules = DEFAULT_RULES
ignore_errors = args.ignore_errors
if args.update:
# Open file
@ -75,9 +62,9 @@ def main():
containers_count = int(args.containers)
print(f"Create containers: {containers_count}")
with ProcessPoolExecutor(max_workers=min(MAX_WORKERS, workers)) as executor:
containers_runs = [executor.submit(create_container, endpoint, policy, container_creation_retry, wallet, wallet_config, rules, args.local)
for _, endpoint, policy in
zip(range(containers_count), cycle(endpoints), cycle(args.policy))]
containers_runs = [executor.submit(create_container, endpoint, args.policy, wallet, wallet_config, args.local)
for _, endpoint in
zip(range(containers_count), cycle(endpoints))]
for run in containers_runs:
container_id = run.result()
@ -87,7 +74,7 @@ def main():
print("Create containers: Completed")
print(f" > Containers: {containers}")
if containers_count > 0 and len(containers) != containers_count:
if containers_count == 0 or len(containers) != containers_count:
print(f"Containers mismatch in preset: expected {containers_count}, created {len(containers)}")
if not ignore_errors:
sys.exit(ERROR_WRONG_CONTAINERS_COUNT)

View file

@ -11,12 +11,6 @@ from concurrent.futures import ProcessPoolExecutor
from helpers.cmd import random_payload
from helpers.aws_cli import create_bucket, upload_object
ERROR_WRONG_CONTAINERS_COUNT = 1
ERROR_WRONG_OBJECTS_COUNT = 2
ERROR_WRONG_PERCENTAGE = 3
MAX_WORKERS = 50
DEFAULT_LOCATION = ""
parser = argparse.ArgumentParser()
parser.add_argument('--size', help='Upload objects size in kb.')
@ -26,19 +20,21 @@ parser.add_argument('--preload_obj', help='Number of pre-loaded objects.')
parser.add_argument('--endpoint', help='S3 Gateways addresses separated by comma.')
parser.add_argument('--update', help='True/False, False by default. Save existed buckets from target file (--out). '
'New buckets will not be created.')
parser.add_argument('--location', help=f'AWS location constraint. Default is "{DEFAULT_LOCATION}"', action="append")
parser.add_argument('--versioning', help='True/False, False by default. Alias of --buckets_versioned=100')
parser.add_argument('--buckets_versioned', help='Percent of versioned buckets. Default is 0', default=0)
parser.add_argument('--location', help='AWS location. Will be empty, if has not be declared.', default="")
parser.add_argument('--versioning', help='True/False, False by default.')
parser.add_argument('--ignore-errors', help='Ignore preset errors', action='store_true')
parser.add_argument('--no-verify-ssl', help='Ignore SSL verifications', action='store_true')
parser.add_argument('--workers', help='Count of workers in preset. Max = 50, Default = 50', default=50)
parser.add_argument('--sleep', help='Time to sleep between buckets creation and objects upload (in seconds), '
'Default = 8', default=8)
parser.add_argument('--acl', help='Bucket ACL. Default is private. Expected values are: private, public-read or public-read-write.', default="private")
args = parser.parse_args()
print(args)
ERROR_WRONG_CONTAINERS_COUNT = 1
ERROR_WRONG_OBJECTS_COUNT = 2
MAX_WORKERS = 50
def main():
buckets = []
objects_list = []
@ -46,8 +42,6 @@ def main():
no_verify_ssl = args.no_verify_ssl
endpoints = args.endpoint.split(',')
if not args.location:
args.location = [DEFAULT_LOCATION]
workers = int(args.workers)
objects_per_bucket = int(args.preload_obj)
@ -64,18 +58,9 @@ def main():
print(f"Create buckets: {buckets_count}")
with ProcessPoolExecutor(max_workers=min(MAX_WORKERS, workers)) as executor:
if not 0 <= int(args.buckets_versioned) <= 100:
print(f"Percent of versioned buckets must be between 0 and 100: got {args.buckets_versioned}")
if not ignore_errors:
sys.exit(ERROR_WRONG_PERCENTAGE)
if args.versioning == "True":
versioning_per_bucket = [True] * buckets_count
else:
num_versioned_buckets = int((int(args.buckets_versioned) / 100) * buckets_count)
versioning_per_bucket = [True] * num_versioned_buckets + [False] * (buckets_count - num_versioned_buckets)
buckets_runs = [executor.submit(create_bucket, endpoint, versioning_per_bucket[i], location, args.acl, no_verify_ssl)
for i, endpoint, location in
zip(range(buckets_count), cycle(endpoints), cycle(args.location))]
buckets_runs = [executor.submit(create_bucket, endpoint, args.versioning, args.location, no_verify_ssl)
for _, endpoint in
zip(range(buckets_count), cycle(endpoints))]
for run in buckets_runs:
bucket_name = run.result()
@ -85,7 +70,7 @@ def main():
print("Create buckets: Completed")
print(f" > Buckets: {buckets}")
if buckets_count > 0 and len(buckets) != buckets_count:
if buckets_count == 0 or len(buckets) != buckets_count:
print(f"Buckets mismatch in preset: expected {buckets_count}, created {len(buckets)}")
if not ignore_errors:
sys.exit(ERROR_WRONG_CONTAINERS_COUNT)

View file

@ -20,8 +20,7 @@ Scenarios `grpc.js`, `local.js`, `http.js` and `s3.js` support the following opt
* `SELECTION_SIZE` - size of batch to select for deletion (default: 1000).
* `PAYLOAD_TYPE` - type of an object payload ("random" or "text", default: "random").
* `STREAMING` - if set, the payload is generated on the fly and is not read into memory fully.
* `METRIC_TAGS` - custom metrics tags (format `tag1:value1;tag2:value2`).
* `DATE_FORMAT` - custom datetime format: `timeonly` (default), `datetime` or `none`.
* `METRIC_TAGS` - `instance` tag value.
Additionally, the profiling extension can be enabled to generate CPU and memory profiles which can be inspected with `go tool pprof file.prof`:
```shell
@ -72,12 +71,11 @@ $ ./scenarios/preset/preset_grpc.py --size 1024 --containers 1 --out grpc.json -
2. Execute scenario with options:
```shell
$ ./k6 run -e DURATION=60 -e WRITE_OBJ_SIZE=8192 -e READERS=20 -e WRITERS=20 -e DELETERS=30 -e DELETE_AGE=10 -e REGISTRY_FILE=registry.bolt -e CONFIG_FILE=/path/to/config.yaml -e CONFIG_DIR=/path/to/dir/ -e PREGEN_JSON=./grpc.json scenarios/local.js
$ ./k6 run -e DURATION=60 -e WRITE_OBJ_SIZE=8192 -e READERS=20 -e WRITERS=20 -e DELETERS=30 -e DELETE_AGE=10 -e REGISTRY_FILE=registry.bolt -e CONFIG_FILE=/path/to/config.yaml -e PREGEN_JSON=./grpc.json scenarios/local.js
```
Options (in addition to the common options):
* `CONFIG_FILE` - path to the local configuration file used for the storage node. Only the storage configuration section is used.
* `CONFIG_DIR` - path to the folder with local configuration files used for the storage node.
* `DELETERS` - number of VUs performing delete operations (using deleters requires that options `DELETE_AGE` and `REGISTRY_FILE` are specified as well).
* `DELETE_AGE` - age of object in seconds before which it can not be deleted. This parameter can be used to control how many objects we have in the system under load.
* `MAX_TOTAL_SIZE_GB` - if specified, max payload size in GB of the storage engine. If the storage engine is already full, no new objects will be saved.
@ -126,7 +124,7 @@ The tests will use all pre-created buckets for PUT operations and all pre-create
$ ./scenarios/preset/preset_s3.py --size 1024 --buckets 1 --out s3_1024kb.json --endpoint host1:8084 --preload_obj 500 --location load-1-4
```
* '--location' - specify the name of container policy (from policy.json file). It's important to run 'aws configure' each time when the policy file has been changed to pick up the latest policies.
* '--buckets_versioned' - specify the percentage of versioned buckets from the total number of created buckets. Default is 0
3. Execute scenario with options:
```shell
@ -139,8 +137,6 @@ Options (in addition to the common options):
* `DELETE_AGE` - age of object in seconds before which it can not be deleted. This parameter can be used to control how many objects we have in the system under load.
* `SLEEP_DELETE` - time interval (in seconds) between deleting VU iterations.
* `OBJ_NAME` - if specified, this name will be used for all write operations instead of random generation.
* `OBJ_NAME_LENGTH` - if specified, then name of the object will be generated with the specified length of ASCII characters.
* `DIR_HEIGHT`, `DIR_WIDTH` - if both specified, object name will consist of `DIR_HEIGHT` directories, each of which can have `DIR_WIDTH` subdirectories, for example for `DIR_HEIGHT = 3, DIR_WIDTH = 100`, object names will be `/dir{1...100}/dir{1...100}/dir{1...100}/{uuid || OBJ_NAME}`
## S3 Multipart
@ -182,7 +178,7 @@ After this, the `pregen.json` file will contain a `containers` list field the sa
3. Execute the scenario with the desired options. For example:
```shell
$ ./k6 run -e DURATION=60 -e WRITE_OBJ_SIZE=8192 -e READERS=20 -e WRITERS=20 -e CONFIG_FILE=/path/to/node/config.yml -e CONFIG_DIR=/path/to/dir/ -e PREGEN_JSON=pregen.json scenarios/s3local.js
$ ./k6 run -e DURATION=60 -e WRITE_OBJ_SIZE=8192 -e READERS=20 -e WRITERS=20 -e CONFIG_FILE=/path/to/node/config.yml -e PREGEN_JSON=pregen.json scenarios/s3local.js
```
Note that the `s3local` scenario currently does not support deleters.
@ -206,27 +202,6 @@ K6_PROMETHEUS_RW_TREND_STATS="p(95),p(99),min,max" \
./k6 run ... -o experimental-prometheus-rw -e METRIC_TAGS="instance:server1;run:run1" scenario.js
```
## Grafana annotations
There is no option to export Grafana annotaions, but it can be easily done with `curl` and Grafana's annotations API.
Example:
```shell
curl --request POST \
--url https://user:password@grafana.host/api/annotations \
--header 'Content-Type: application/json' \
--data '{
"dashboardUID": "YsVWNpMIk",
"time": 1706533045014,
"timeEnd": 1706533085100,
"tags": [
"tag1",
"tag2"
],
"text": "Test annotation"
}'
```
See [Grafana docs](https://grafana.com/docs/grafana/latest/developers/http_api/annotations/) for details.
## Verify
This scenario allows to verify that objects created by a previous run are really stored in the system and their data is not corrupted. Running this scenario assumes that you've already run gRPC or HTTP or S3 scenario with option `REGISTRY_FILE`.

View file

@ -1,59 +1,70 @@
import {sleep} from 'k6';
import {SharedArray} from 'k6/data';
import exec from 'k6/execution';
import logging from 'k6/x/frostfs/logging';
import registry from 'k6/x/frostfs/registry';
import s3 from 'k6/x/frostfs/s3';
import stats from 'k6/x/frostfs/stats';
import {generateS3Key} from './libs/keygen.js';
import {parseEnv} from './libs/env-parser.js';
import {textSummary} from './libs/k6-summary-0.0.2.js';
import {newGenerator} from './libs/datagen.js';
import s3 from 'k6/x/frostfs/s3';
import { SharedArray } from 'k6/data';
import { sleep } from 'k6';
import { textSummary } from './libs/k6-summary-0.0.2.js';
import { parseEnv } from './libs/env-parser.js';
import { uuidv4 } from './libs/k6-utils-1.4.0.js';
import { newGenerator } from './libs/datagen.js';
parseEnv();
const obj_list = new SharedArray(
'obj_list',
function() { return JSON.parse(open(__ENV.PREGEN_JSON)).objects; });
const obj_list = new SharedArray('obj_list', function () {
return JSON.parse(open(__ENV.PREGEN_JSON)).objects;
});
const bucket_list = new SharedArray(
'bucket_list',
function() { return JSON.parse(open(__ENV.PREGEN_JSON)).buckets; });
const bucket_list = new SharedArray('bucket_list', function () {
return JSON.parse(open(__ENV.PREGEN_JSON)).buckets;
});
const read_size = JSON.parse(open(__ENV.PREGEN_JSON)).obj_size;
const summary_json = __ENV.SUMMARY_JSON || '/tmp/summary.json';
const summary_json = __ENV.SUMMARY_JSON || "/tmp/summary.json";
const no_verify_ssl = __ENV.NO_VERIFY_SSL || 'true';
const connection_args = {
no_verify_ssl : no_verify_ssl
}
const no_verify_ssl = __ENV.NO_VERIFY_SSL || "true";
const connection_args = {no_verify_ssl: no_verify_ssl}
// Select random S3 endpoint for current VU
const s3_endpoints = __ENV.S3_ENDPOINTS.split(',');
const s3_endpoint =
s3_endpoints[Math.floor(Math.random() * s3_endpoints.length)];
const s3_endpoint = s3_endpoints[Math.floor(Math.random() * s3_endpoints.length)];
const s3_client = s3.connect(s3_endpoint, connection_args);
const log = logging.new().withField('endpoint', s3_endpoint);
const log = logging.new().withField("endpoint", s3_endpoint);
const registry_enabled = !!__ENV.REGISTRY_FILE;
const obj_registry =
registry_enabled ? registry.open(__ENV.REGISTRY_FILE) : undefined;
const obj_registry = registry_enabled ? registry.open(__ENV.REGISTRY_FILE) : undefined;
const duration = __ENV.DURATION;
if (!!__ENV.METRIC_TAGS) {
stats.setTags(__ENV.METRIC_TAGS)
stats.setTags(__ENV.METRIC_TAGS)
}
const delete_age = __ENV.DELETE_AGE ? parseInt(__ENV.DELETE_AGE) : undefined;
let obj_to_delete_selector = undefined;
if (registry_enabled && delete_age) {
obj_to_delete_selector = registry.getSelector(
__ENV.REGISTRY_FILE,
"obj_to_delete",
__ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0,
{
status: "created",
age: delete_age,
}
);
}
const read_age = __ENV.READ_AGE ? parseInt(__ENV.READ_AGE) : 10;
let obj_to_read_selector = undefined;
if (registry_enabled) {
obj_to_read_selector = registry.getLoopedSelector(
__ENV.REGISTRY_FILE, 'obj_to_read',
__ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0, {
status : 'created',
age : read_age,
})
obj_to_read_selector = registry.getSelector(
__ENV.REGISTRY_FILE,
"obj_to_read",
__ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0,
{
status: "created",
age: read_age,
}
)
}
const scenarios = {};
@ -61,166 +72,136 @@ const scenarios = {};
const write_vu_count = parseInt(__ENV.WRITERS || '0');
const generator = newGenerator(write_vu_count > 0);
if (write_vu_count > 0) {
scenarios.write = {
executor : 'constant-vus',
vus : write_vu_count,
duration : `${duration}s`,
exec : 'obj_write',
gracefulStop : '5s',
};
}
const delete_age = __ENV.DELETE_AGE ? parseInt(__ENV.DELETE_AGE) : undefined;
let obj_to_delete_selector = undefined;
let obj_to_delete_exit_on_null = undefined;
if (registry_enabled && delete_age) {
obj_to_delete_exit_on_null = write_vu_count == 0;
let constructor = obj_to_delete_exit_on_null ? registry.getOneshotSelector
: registry.getSelector;
obj_to_delete_selector =
constructor(__ENV.REGISTRY_FILE, 'obj_to_delete',
__ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0, {
status : 'created',
age : delete_age,
});
scenarios.write = {
executor: 'constant-vus',
vus: write_vu_count,
duration: `${duration}s`,
exec: 'obj_write',
gracefulStop: '5s',
};
}
const read_vu_count = parseInt(__ENV.READERS || '0');
if (read_vu_count > 0) {
scenarios.read = {
executor : 'constant-vus',
vus : read_vu_count,
duration : `${duration}s`,
exec : 'obj_read',
gracefulStop : '5s',
};
scenarios.read = {
executor: 'constant-vus',
vus: read_vu_count,
duration: `${duration}s`,
exec: 'obj_read',
gracefulStop: '5s',
};
}
const delete_vu_count = parseInt(__ENV.DELETERS || '0');
if (delete_vu_count > 0) {
if (!obj_to_delete_selector) {
throw 'Positive DELETE worker number without a proper object selector';
}
if (!obj_to_delete_selector) {
throw 'Positive DELETE worker number without a proper object selector';
}
scenarios.delete = {
executor : 'constant-vus',
vus : delete_vu_count,
duration : `${duration}s`,
exec : 'obj_delete',
gracefulStop : '5s',
};
scenarios.delete = {
executor: 'constant-vus',
vus: delete_vu_count,
duration: `${duration}s`,
exec: 'obj_delete',
gracefulStop: '5s',
};
}
export const options = {
scenarios,
setupTimeout : '5s',
scenarios,
setupTimeout: '5s',
};
export function setup() {
const total_vu_count = write_vu_count + read_vu_count + delete_vu_count;
const total_vu_count = write_vu_count + read_vu_count + delete_vu_count;
console.log(`Pregenerated buckets: ${bucket_list.length}`);
console.log(`Pregenerated read object size: ${read_size}`);
console.log(`Pregenerated total objects: ${obj_list.length}`);
console.log(`Reading VUs: ${read_vu_count}`);
console.log(`Writing VUs: ${write_vu_count}`);
console.log(`Deleting VUs: ${delete_vu_count}`);
console.log(`Total VUs: ${total_vu_count}`);
const start_timestamp = Date.now()
console.log(
`Load started at: ${Date(start_timestamp).toString()}`)
if (delete_vu_count > 0){
obj_to_delete_selector.sync.add(delete_vu_count)
}
console.log(`Pregenerated buckets: ${bucket_list.length}`);
console.log(`Pregenerated read object size: ${read_size}`);
console.log(`Pregenerated total objects: ${obj_list.length}`);
console.log(`Reading VUs: ${read_vu_count}`);
console.log(`Writing VUs: ${write_vu_count}`);
console.log(`Deleting VUs: ${delete_vu_count}`);
console.log(`Total VUs: ${total_vu_count}`);
const start_timestamp = Date.now()
console.log(`Load started at: ${Date(start_timestamp).toString()}`)
}
export function teardown(data) {
if (obj_registry) {
obj_registry.close();
}
const end_timestamp = Date.now()
console.log(
`Load finished at: ${Date(end_timestamp).toString()}`)
if (obj_registry) {
obj_registry.close();
}
const end_timestamp = Date.now()
console.log(`Load finished at: ${Date(end_timestamp).toString()}`)
}
export function handleSummary(data) {
return {
'stdout' : textSummary(data, {indent : ' ', enableColors : false}),
[summary_json] : JSON.stringify(data),
};
}
return {
'stdout': textSummary(data, { indent: ' ', enableColors: false }),
[summary_json]: JSON.stringify(data),
};
}
export function obj_write() {
if (__ENV.SLEEP_WRITE) {
sleep(__ENV.SLEEP_WRITE);
}
if (__ENV.SLEEP_WRITE) {
sleep(__ENV.SLEEP_WRITE);
}
const key = generateS3Key();
const bucket = bucket_list[Math.floor(Math.random() * bucket_list.length)];
const key = __ENV.OBJ_NAME || uuidv4();
const bucket = bucket_list[Math.floor(Math.random() * bucket_list.length)];
const payload = generator.genPayload();
const resp = s3_client.put(bucket, key, payload);
if (!resp.success) {
log.withFields({bucket : bucket, key : key}).error(resp.error);
return;
}
const payload = generator.genPayload();
const resp = s3_client.put(bucket, key, payload);
if (!resp.success) {
log.withFields({bucket: bucket, key: key}).error(resp.error);
return;
}
if (obj_registry) {
obj_registry.addObject('', '', bucket, key, payload.hash());
}
if (obj_registry) {
obj_registry.addObject("", "", bucket, key, payload.hash());
}
}
export function obj_read() {
if (__ENV.SLEEP_READ) {
sleep(__ENV.SLEEP_READ);
}
if (obj_to_read_selector) {
const obj = obj_to_read_selector.nextObject();
if (!obj) {
return;
if (__ENV.SLEEP_READ) {
sleep(__ENV.SLEEP_READ);
}
const resp = s3_client.get(obj.s3_bucket, obj.s3_key)
if(obj_to_read_selector) {
const obj = obj_to_read_selector.nextObject();
if (!obj) {
return;
}
const resp = s3_client.get(obj.s3_bucket, obj.s3_key)
if (!resp.success) {
log.withFields({bucket: obj.s3_bucket, key: obj.s3_key}).error(resp.error);
}
return
}
const obj = obj_list[Math.floor(Math.random() * obj_list.length)];
const resp = s3_client.get(obj.bucket, obj.object);
if (!resp.success) {
log.withFields({bucket : obj.s3_bucket, key : obj.s3_key})
.error(resp.error);
}
return
}
const obj = obj_list[Math.floor(Math.random() * obj_list.length)];
const resp = s3_client.get(obj.bucket, obj.object);
if (!resp.success) {
log.withFields({bucket : obj.bucket, key : obj.object}).error(resp.error);
}
log.withFields({bucket: obj.bucket, key: obj.object}).error(resp.error);
}
}
export function obj_delete() {
if (__ENV.SLEEP_DELETE) {
sleep(__ENV.SLEEP_DELETE);
}
const obj = obj_to_delete_selector.nextObject();
if (!obj) {
if (obj_to_delete_exit_on_null) {
obj_to_delete_selector.sync.done()
obj_to_delete_selector.sync.wait()
exec.test.abort("No more objects to select");
if (__ENV.SLEEP_DELETE) {
sleep(__ENV.SLEEP_DELETE);
}
return;
}
const resp = s3_client.delete(obj.s3_bucket, obj.s3_key);
if (!resp.success) {
log.withFields({bucket : obj.s3_bucket, key : obj.s3_key, op : 'DELETE'})
.error(resp.error);
return;
}
const obj = obj_to_delete_selector.nextObject();
if (!obj) {
return;
}
obj_registry.deleteObject(obj.id);
const resp = s3_client.delete(obj.s3_bucket, obj.s3_key);
if (!resp.success) {
log.withFields({bucket: obj.s3_bucket, key: obj.s3_key, op: "DELETE"}).error(resp.error);
return;
}
obj_registry.deleteObject(obj.id);
}

View file

@ -1,69 +1,70 @@
import {sleep} from 'k6';
import {SharedArray} from 'k6/data';
import logging from 'k6/x/frostfs/logging';
import registry from 'k6/x/frostfs/registry';
import s3 from 'k6/x/frostfs/s3';
import stats from 'k6/x/frostfs/stats';
import {generateS3Key} from './libs/keygen.js';
import {newGenerator} from './libs/datagen.js';
import {parseEnv} from './libs/env-parser.js';
import {textSummary} from './libs/k6-summary-0.0.2.js';
import s3 from 'k6/x/frostfs/s3';
import { SharedArray } from 'k6/data';
import { sleep } from 'k6';
import { textSummary } from './libs/k6-summary-0.0.2.js';
import { parseEnv } from './libs/env-parser.js';
import { uuidv4 } from './libs/k6-utils-1.4.0.js';
import { newGenerator } from './libs/datagen.js';
parseEnv();
const obj_list = new SharedArray('obj_list', function() {
return JSON.parse(open(__ENV.PREGEN_JSON)).objects;
const obj_list = new SharedArray('obj_list', function () {
return JSON.parse(open(__ENV.PREGEN_JSON)).objects;
});
const bucket_list = new SharedArray('bucket_list', function() {
return JSON.parse(open(__ENV.PREGEN_JSON)).buckets;
const bucket_list = new SharedArray('bucket_list', function () {
return JSON.parse(open(__ENV.PREGEN_JSON)).buckets;
});
const read_size = JSON.parse(open(__ENV.PREGEN_JSON)).obj_size;
const summary_json = __ENV.SUMMARY_JSON || '/tmp/summary.json';
const summary_json = __ENV.SUMMARY_JSON || "/tmp/summary.json";
// Select random S3 endpoint for current VU
const s3_endpoints = __ENV.S3_ENDPOINTS.split(',');
const s3_endpoint =
s3_endpoints[Math.floor(Math.random() * s3_endpoints.length)];
const no_verify_ssl = __ENV.NO_VERIFY_SSL || 'true';
const connection_args = {
no_verify_ssl: no_verify_ssl
};
const s3_endpoint = s3_endpoints[Math.floor(Math.random() * s3_endpoints.length)];
const no_verify_ssl = __ENV.NO_VERIFY_SSL || "true";
const connection_args = {no_verify_ssl: no_verify_ssl}
const s3_client = s3.connect(s3_endpoint, connection_args);
const log = logging.new().withField('endpoint', s3_endpoint);
const log = logging.new().withField("endpoint", s3_endpoint);
const registry_enabled = !!__ENV.REGISTRY_FILE;
const obj_registry =
registry_enabled ? registry.open(__ENV.REGISTRY_FILE) : undefined;
const obj_registry = registry_enabled ? registry.open(__ENV.REGISTRY_FILE) : undefined;
const duration = __ENV.DURATION;
if (!!__ENV.METRIC_TAGS) {
stats.setTags(__ENV.METRIC_TAGS)
stats.setTags(__ENV.METRIC_TAGS)
}
const delete_age = __ENV.DELETE_AGE ? parseInt(__ENV.DELETE_AGE) : undefined;
let obj_to_delete_selector = undefined;
if (registry_enabled && delete_age) {
obj_to_delete_selector = registry.getSelector(
__ENV.REGISTRY_FILE, 'obj_to_delete',
__ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0, {
status: 'created',
age: delete_age,
});
obj_to_delete_selector = registry.getSelector(
__ENV.REGISTRY_FILE,
"obj_to_delete",
__ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0,
{
status: "created",
age: delete_age,
}
);
}
const read_age = __ENV.READ_AGE ? parseInt(__ENV.READ_AGE) : 10;
let obj_to_read_selector = undefined;
if (registry_enabled) {
obj_to_read_selector = registry.getLoopedSelector(
__ENV.REGISTRY_FILE, 'obj_to_read',
__ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0, {
status: 'created',
age: read_age,
})
obj_to_read_selector = registry.getSelector(
__ENV.REGISTRY_FILE,
"obj_to_read",
__ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0,
{
status: "created",
age: read_age,
}
)
}
const scenarios = {};
@ -74,16 +75,16 @@ const max_write_vus = parseInt(__ENV.MAX_WRITERS || pre_alloc_write_vus);
const write_rate = parseInt(__ENV.WRITE_RATE || '0');
const generator = newGenerator(write_rate > 0);
if (write_rate > 0) {
scenarios.write = {
executor: 'constant-arrival-rate',
duration: `${duration}s`,
preAllocatedVUs: pre_alloc_write_vus,
maxVUs: max_write_vus,
rate: write_rate,
timeUnit: time_unit,
exec: 'obj_write',
gracefulStop: '5s',
};
scenarios.write = {
executor: 'constant-arrival-rate',
duration: `${duration}s`,
preAllocatedVUs: pre_alloc_write_vus,
maxVUs: max_write_vus,
rate: write_rate,
timeUnit: time_unit,
exec: 'obj_write',
gracefulStop: '5s',
};
}
@ -91,16 +92,16 @@ const pre_alloc_read_vus = parseInt(__ENV.PRE_ALLOC_READERS || '0');
const max_read_vus = parseInt(__ENV.MAX_READERS || pre_alloc_read_vus);
const read_rate = parseInt(__ENV.READ_RATE || '0');
if (read_rate > 0) {
scenarios.read = {
executor: 'constant-arrival-rate',
duration: `${duration}s`,
preAllocatedVUs: pre_alloc_write_vus,
maxVUs: max_read_vus,
rate: read_rate,
timeUnit: time_unit,
exec: 'obj_read',
gracefulStop: '5s',
};
scenarios.read = {
executor: 'constant-arrival-rate',
duration: `${duration}s`,
preAllocatedVUs: pre_alloc_write_vus,
maxVUs: max_read_vus,
rate: read_rate,
timeUnit: time_unit,
exec: 'obj_read',
gracefulStop: '5s',
};
}
@ -108,132 +109,126 @@ const pre_alloc_delete_vus = parseInt(__ENV.PRE_ALLOC_DELETERS || '0');
const max_delete_vus = parseInt(__ENV.MAX_DELETERS || pre_alloc_write_vus);
const delete_rate = parseInt(__ENV.DELETE_RATE || '0');
if (delete_rate > 0) {
if (!obj_to_delete_selector) {
throw new Error(
'Positive DELETE worker number without a proper object selector');
}
if (!obj_to_delete_selector) {
throw new Error('Positive DELETE worker number without a proper object selector');
}
scenarios.delete = {
executor: 'constant-arrival-rate',
duration: `${duration}s`,
preAllocatedVUs: pre_alloc_delete_vus,
maxVUs: max_delete_vus,
rate: delete_rate,
timeUnit: time_unit,
exec: 'obj_delete',
gracefulStop: '5s',
};
scenarios.delete = {
executor: 'constant-arrival-rate',
duration: `${duration}s`,
preAllocatedVUs: pre_alloc_delete_vus,
maxVUs: max_delete_vus,
rate: delete_rate,
timeUnit: time_unit,
exec: 'obj_delete',
gracefulStop: '5s',
};
}
export const options = {
scenarios,
setupTimeout: '5s',
scenarios,
setupTimeout: '5s',
};
export function setup() {
const total_pre_allocated_vu_count =
pre_alloc_write_vus + pre_alloc_read_vus + pre_alloc_delete_vus;
const total_max_vu_count = max_read_vus + max_write_vus + max_delete_vus
const total_pre_allocated_vu_count = pre_alloc_write_vus + pre_alloc_read_vus + pre_alloc_delete_vus;
const total_max_vu_count = max_read_vus + max_write_vus + max_delete_vus
console.log(`Pregenerated buckets: ${bucket_list.length}`);
console.log(`Pregenerated read object size: ${read_size}`);
console.log(`Pregenerated total objects: ${obj_list.length}`);
console.log(`Pre allocated reading VUs: ${pre_alloc_read_vus}`);
console.log(`Pre allocated writing VUs: ${pre_alloc_write_vus}`);
console.log(`Pre allocated deleting VUs: ${pre_alloc_delete_vus}`);
console.log(`Total pre allocated VUs: ${total_pre_allocated_vu_count}`);
console.log(`Max reading VUs: ${max_read_vus}`);
console.log(`Max writing VUs: ${max_write_vus}`);
console.log(`Max deleting VUs: ${max_delete_vus}`);
console.log(`Total max VUs: ${total_max_vu_count}`);
console.log(`Time unit: ${time_unit}`);
console.log(`Read rate: ${read_rate}`);
console.log(`Writing rate: ${write_rate}`);
console.log(`Delete rate: ${delete_rate}`);
console.log(`Pregenerated buckets: ${bucket_list.length}`);
console.log(`Pregenerated read object size: ${read_size}`);
console.log(`Pregenerated total objects: ${obj_list.length}`);
console.log(`Pre allocated reading VUs: ${pre_alloc_read_vus}`);
console.log(`Pre allocated writing VUs: ${pre_alloc_write_vus}`);
console.log(`Pre allocated deleting VUs: ${pre_alloc_delete_vus}`);
console.log(`Total pre allocated VUs: ${total_pre_allocated_vu_count}`);
console.log(`Max reading VUs: ${max_read_vus}`);
console.log(`Max writing VUs: ${max_write_vus}`);
console.log(`Max deleting VUs: ${max_delete_vus}`);
console.log(`Total max VUs: ${total_max_vu_count}`);
console.log(`Time unit: ${time_unit}`);
console.log(`Read rate: ${read_rate}`);
console.log(`Writing rate: ${write_rate}`);
console.log(`Delete rate: ${delete_rate}`);
const start_timestamp = Date.now()
console.log(
`Load started at: ${Date(start_timestamp).toString()}`)
const start_timestamp = Date.now()
console.log(`Load started at: ${Date(start_timestamp).toString()}`)
}
export function teardown(data) {
if (obj_registry) {
obj_registry.close();
}
const end_timestamp = Date.now()
console.log(
`Load finished at: ${Date(end_timestamp).toString()}`)
if (obj_registry) {
obj_registry.close();
}
const end_timestamp = Date.now()
console.log(`Load finished at: ${Date(end_timestamp).toString()}`)
}
export function handleSummary(data) {
return {
'stdout': textSummary(data, {indent: ' ', enableColors: false}),
[summary_json]: JSON.stringify(data),
};
}
return {
'stdout': textSummary(data, { indent: ' ', enableColors: false }),
[summary_json]: JSON.stringify(data),
};
}
export function obj_write() {
if (__ENV.SLEEP_WRITE) {
sleep(__ENV.SLEEP_WRITE);
}
if (__ENV.SLEEP_WRITE) {
sleep(__ENV.SLEEP_WRITE);
}
const key = generateS3Key();
const bucket = bucket_list[Math.floor(Math.random() * bucket_list.length)];
const key = __ENV.OBJ_NAME || uuidv4();
const bucket = bucket_list[Math.floor(Math.random() * bucket_list.length)];
const payload = generator.genPayload();
const resp = s3_client.put(bucket, key, payload);
if (!resp.success) {
log.withFields({bucket: bucket, key: key}).error(resp.error);
return;
}
const payload = generator.genPayload();
const resp = s3_client.put(bucket, key, payload);
if (!resp.success) {
log.withFields({bucket: bucket, key: key}).error(resp.error);
return;
}
if (obj_registry) {
obj_registry.addObject('', '', bucket, key, payload.hash());
}
if (obj_registry) {
obj_registry.addObject("", "", bucket, key, payload.hash());
}
}
export function obj_read() {
if (__ENV.SLEEP_READ) {
sleep(__ENV.SLEEP_READ);
}
if (obj_to_read_selector) {
const obj = obj_to_read_selector.nextObject();
if (!obj) {
return;
if (__ENV.SLEEP_READ) {
sleep(__ENV.SLEEP_READ);
}
const resp = s3_client.get(obj.s3_bucket, obj.s3_key)
if(obj_to_read_selector) {
const obj = obj_to_read_selector.nextObject();
if (!obj) {
return;
}
const resp = s3_client.get(obj.s3_bucket, obj.s3_key)
if (!resp.success) {
log.withFields({bucket: obj.s3_bucket, key: obj.s3_key}).error(resp.error);
}
return
}
const obj = obj_list[Math.floor(Math.random() * obj_list.length)];
const resp = s3_client.get(obj.bucket, obj.object);
if (!resp.success) {
log.withFields({bucket: obj.s3_bucket, key: obj.s3_key})
.error(resp.error);
}
return
}
const obj = obj_list[Math.floor(Math.random() * obj_list.length)];
const resp = s3_client.get(obj.bucket, obj.object);
if (!resp.success) {
log.withFields({bucket: obj.bucket, key: obj.object}).error(resp.error);
}
log.withFields({bucket: obj.bucket, key: obj.object}).error(resp.error);
}
}
export function obj_delete() {
if (__ENV.SLEEP_DELETE) {
sleep(__ENV.SLEEP_DELETE);
}
if (__ENV.SLEEP_DELETE) {
sleep(__ENV.SLEEP_DELETE);
}
const obj = obj_to_delete_selector.nextObject();
if (!obj) {
return;
}
const obj = obj_to_delete_selector.nextObject();
if (!obj) {
return;
}
const resp = s3_client.delete(obj.s3_bucket, obj.s3_key);
if (!resp.success) {
log.withFields({bucket: obj.s3_bucket, key: obj.s3_key, op: 'DELETE'})
.error(resp.error);
return;
}
const resp = s3_client.delete(obj.s3_bucket, obj.s3_key);
if (!resp.success) {
log.withFields({bucket: obj.s3_bucket, key: obj.s3_key, op: "DELETE"}).error(resp.error);
return;
}
obj_registry.deleteObject(obj.id);
obj_registry.deleteObject(obj.id);
}

View file

@ -1,233 +0,0 @@
import {sleep} from 'k6';
import {SharedArray} from 'k6/data';
import exec from 'k6/execution';
import logging from 'k6/x/frostfs/logging';
import registry from 'k6/x/frostfs/registry';
import s3 from 'k6/x/frostfs/s3';
import stats from 'k6/x/frostfs/stats';
import {generateS3Key} from './libs/keygen.js';
import {newGenerator} from './libs/datagen.js';
import {parseEnv} from './libs/env-parser.js';
import {textSummary} from './libs/k6-summary-0.0.2.js';
parseEnv();
const obj_list = new SharedArray(
'obj_list',
function() { return JSON.parse(open(__ENV.PREGEN_JSON)).objects; });
const bucket_list = new SharedArray(
'bucket_list',
function() { return JSON.parse(open(__ENV.PREGEN_JSON)).buckets; });
const read_size = JSON.parse(open(__ENV.PREGEN_JSON)).obj_size;
const summary_json = __ENV.SUMMARY_JSON || '/tmp/summary.json';
const no_verify_ssl = __ENV.NO_VERIFY_SSL || 'true';
const connection_args = {
no_verify_ssl : no_verify_ssl
}
// Select random S3 endpoint for current VU
const s3_endpoints = __ENV.S3_ENDPOINTS.split(',');
const s3_endpoint =
s3_endpoints[Math.floor(Math.random() * s3_endpoints.length)];
const s3_client = s3.connect(s3_endpoint, connection_args);
const log = logging.new().withField('endpoint', s3_endpoint);
const registry_enabled = !!__ENV.REGISTRY_FILE;
const obj_registry =
registry_enabled ? registry.open(__ENV.REGISTRY_FILE) : undefined;
const duration = __ENV.DURATION;
if (!!__ENV.METRIC_TAGS) {
stats.setTags(__ENV.METRIC_TAGS)
}
const read_age = __ENV.READ_AGE ? parseInt(__ENV.READ_AGE) : 10;
let obj_to_read_selector = undefined;
if (registry_enabled) {
obj_to_read_selector = registry.getSelector(
__ENV.REGISTRY_FILE, 'obj_to_read',
__ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0, {
status : 'created',
age : read_age,
})
}
const scenarios = {};
const write_vu_count = parseInt(__ENV.WRITERS || '0');
const generator = newGenerator(write_vu_count > 0);
if (write_vu_count > 0) {
scenarios.write = {
executor : 'constant-vus',
vus : write_vu_count,
duration : `${duration}s`,
exec : 'obj_write',
gracefulStop : '5s',
};
}
const read_vu_count = parseInt(__ENV.READERS || '0');
if (read_vu_count > 0) {
scenarios.read = {
executor : 'constant-vus',
vus : read_vu_count,
duration : `${duration}s`,
exec : 'obj_read',
gracefulStop : '5s',
};
}
const delete_age = __ENV.DELETE_AGE ? parseInt(__ENV.DELETE_AGE) : undefined;
let obj_to_delete_selector = undefined;
let obj_to_delete_exit_on_null = undefined;
if (registry_enabled ) {
obj_to_delete_exit_on_null = (write_vu_count == 0) && (read_vu_count == 0)
let constructor = obj_to_delete_exit_on_null ? registry.getOneshotSelector
: registry.getSelector;
obj_to_delete_selector =
constructor(__ENV.REGISTRY_FILE, 'obj_to_delete',
__ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0, {
status : 'read',
age : delete_age,
});
}
const delete_vu_count = parseInt(__ENV.DELETERS || '0');
if (delete_vu_count > 0) {
if (!obj_to_delete_selector) {
throw 'Positive DELETE worker number without a proper object selector';
}
scenarios.delete = {
executor : 'constant-vus',
vus : delete_vu_count,
duration : `${duration}s`,
exec : 'obj_delete',
gracefulStop : '5s',
};
}
export const options = {
scenarios,
setupTimeout : '5s',
};
export function setup() {
const total_vu_count = write_vu_count + read_vu_count + delete_vu_count;
console.log(`Pregenerated buckets: ${bucket_list.length}`);
console.log(`Pregenerated read object size: ${read_size}`);
console.log(`Pregenerated total objects: ${obj_list.length}`);
console.log(`Reading VUs: ${read_vu_count}`);
console.log(`Writing VUs: ${write_vu_count}`);
console.log(`Deleting VUs: ${delete_vu_count}`);
console.log(`Total VUs: ${total_vu_count}`);
const start_timestamp = Date.now()
console.log(
`Load started at: ${Date(start_timestamp).toString()}`)
}
export function teardown(data) {
if (obj_registry) {
obj_registry.close();
}
const end_timestamp = Date.now()
console.log(
`Load finished at: ${Date(end_timestamp).toString()}`)
}
export function handleSummary(data) {
return {
'stdout' : textSummary(data, {indent : ' ', enableColors : false}),
[summary_json] : JSON.stringify(data),
};
}
export function obj_write() {
if (__ENV.SLEEP_WRITE) {
sleep(__ENV.SLEEP_WRITE);
}
const key = generateS3Key();
const bucket = bucket_list[Math.floor(Math.random() * bucket_list.length)];
const payload = generator.genPayload();
const resp = s3_client.put(bucket, key, payload);
if (!resp.success) {
log.withFields({bucket : bucket, key : key}).error(resp.error);
return;
}
if (obj_registry) {
obj_registry.addObject('', '', bucket, key, payload.hash());
}
}
export function obj_read() {
if (__ENV.SLEEP_READ) {
sleep(__ENV.SLEEP_READ);
}
if (obj_to_read_selector) {
const obj = obj_to_read_selector.nextObject();
if (!obj ) {
return;
}
const resp = s3_client.get(obj.s3_bucket, obj.s3_key)
if (!resp.success) {
log.withFields({bucket : obj.s3_bucket, key : obj.s3_key, status: obj.status, op: `READ`})
.error(resp.error);
} else {
obj_registry.setObjectStatus(obj.id, obj.status, 'read');
}
return
}
const obj = obj_list[Math.floor(Math.random() * obj_list.length)];
const resp = s3_client.get(obj.bucket, obj.object);
if (!resp.success) {
log.withFields({bucket : obj.bucket, key : obj.object}).error(resp.error);
} else {
obj_registry.setObjectStatus(obj.id, obj.status, 'read');
}
}
export function obj_delete() {
if (__ENV.SLEEP_DELETE) {
sleep(__ENV.SLEEP_DELETE);
}
const obj = obj_to_delete_selector.nextObject();
delete_object(obj)
}
export function delete_object(obj) {
if (!obj) {
if (obj_to_delete_exit_on_null) {
exec.test.abort("No more objects to select");
}
return;
}
const resp = s3_client.delete(obj.s3_bucket, obj.s3_key);
if (!resp.success) {
log.withFields({bucket : obj.s3_bucket, key : obj.s3_key, op : 'DELETE'})
.error(resp.error);
return;
}
obj_registry.deleteObject(obj.id);
}

View file

@ -1,119 +1,110 @@
import {sleep} from 'k6';
import {SharedArray} from 'k6/data';
import logging from 'k6/x/frostfs/logging';
import registry from 'k6/x/frostfs/registry';
import s3 from 'k6/x/frostfs/s3';
import stats from 'k6/x/frostfs/stats';
import {generateS3Key} from './libs/keygen.js';
import {newGenerator} from './libs/datagen.js';
import {parseEnv} from './libs/env-parser.js';
import s3 from 'k6/x/frostfs/s3';
import {SharedArray} from 'k6/data';
import {sleep} from 'k6';
import {textSummary} from './libs/k6-summary-0.0.2.js';
import {parseEnv} from './libs/env-parser.js';
import {uuidv4} from './libs/k6-utils-1.4.0.js';
import { newGenerator } from './libs/datagen.js';
parseEnv();
const bucket_list = new SharedArray('bucket_list', function() {
return JSON.parse(open(__ENV.PREGEN_JSON)).buckets;
const bucket_list = new SharedArray('bucket_list', function () {
return JSON.parse(open(__ENV.PREGEN_JSON)).buckets;
});
const summary_json = __ENV.SUMMARY_JSON || '/tmp/summary.json';
const summary_json = __ENV.SUMMARY_JSON || "/tmp/summary.json";
// Select random S3 endpoint for current VU
const s3_endpoints = __ENV.S3_ENDPOINTS.split(',');
const s3_endpoint =
s3_endpoints[Math.floor(Math.random() * s3_endpoints.length)];
const no_verify_ssl = __ENV.NO_VERIFY_SSL || 'true';
const connection_args = {
no_verify_ssl: no_verify_ssl
};
const s3_endpoint = s3_endpoints[Math.floor(Math.random() * s3_endpoints.length)];
const no_verify_ssl = __ENV.NO_VERIFY_SSL || "true";
const connection_args = {no_verify_ssl: no_verify_ssl}
const s3_client = s3.connect(s3_endpoint, connection_args);
const log = logging.new().withField('endpoint', s3_endpoint);
const log = logging.new().withField("endpoint", s3_endpoint);
const registry_enabled = !!__ENV.REGISTRY_FILE;
const obj_registry =
registry_enabled ? registry.open(__ENV.REGISTRY_FILE) : undefined;
const obj_registry = registry_enabled ? registry.open(__ENV.REGISTRY_FILE) : undefined;
const duration = __ENV.DURATION;
if (!!__ENV.METRIC_TAGS) {
stats.setTags(__ENV.METRIC_TAGS)
stats.setTags(__ENV.METRIC_TAGS)
}
const scenarios = {};
const write_vu_count = parseInt(__ENV.WRITERS || '0');
if (write_vu_count < 1) {
throw 'number of VUs (env WRITERS) performing write operations should be greater than 0';
throw 'number of VUs (env WRITERS) performing write operations should be greater than 0';
}
const write_multipart_vu_count = parseInt(__ENV.WRITERS_MULTIPART || '0');
if (write_multipart_vu_count < 1) {
throw 'number of parts (env WRITERS_MULTIPART) to upload in parallel should be greater than 0';
throw 'number of parts (env WRITERS_MULTIPART) to upload in parallel should be greater than 0';
}
const generator =
newGenerator(write_vu_count > 0 || write_multipart_vu_count > 0);
const generator = newGenerator(write_vu_count > 0 || write_multipart_vu_count > 0);
if (write_vu_count > 0) {
scenarios.write_multipart = {
executor: 'constant-vus',
vus: write_vu_count,
duration: `${duration}s`,
exec: 'obj_write_multipart',
gracefulStop: '5s',
};
scenarios.write_multipart = {
executor: 'constant-vus',
vus: write_vu_count,
duration: `${duration}s`,
exec: 'obj_write_multipart',
gracefulStop: '5s',
};
}
export const options = {
scenarios,
setupTimeout: '5s',
scenarios,
setupTimeout: '5s',
};
export function setup() {
const total_vu_count = write_vu_count * write_multipart_vu_count;
const total_vu_count = write_vu_count * write_multipart_vu_count;
console.log(`Pregenerated buckets: ${bucket_list.length}`);
console.log(`Writing VUs: ${write_vu_count}`);
console.log(`Writing multipart VUs: ${write_multipart_vu_count}`);
console.log(`Total VUs: ${total_vu_count}`);
console.log(`Pregenerated buckets: ${bucket_list.length}`);
console.log(`Writing VUs: ${write_vu_count}`);
console.log(`Writing multipart VUs: ${write_multipart_vu_count}`);
console.log(`Total VUs: ${total_vu_count}`);
}
export function teardown(data) {
if (obj_registry) {
obj_registry.close();
}
if (obj_registry) {
obj_registry.close();
}
}
export function handleSummary(data) {
return {
'stdout': textSummary(data, {indent: ' ', enableColors: false}),
[summary_json]: JSON.stringify(data),
};
return {
'stdout': textSummary(data, {indent: ' ', enableColors: false}),
[summary_json]: JSON.stringify(data),
};
}
const write_multipart_part_size =
1024 * parseInt(__ENV.WRITE_OBJ_PART_SIZE || '0')
const write_multipart_part_size = 1024 * parseInt(__ENV.WRITE_OBJ_PART_SIZE || '0')
if (write_multipart_part_size < 5 * 1024 * 1024) {
throw 'part size (env WRITE_OBJ_PART_SIZE * 1024) must be greater than (5 MB)';
throw 'part size (env WRITE_OBJ_PART_SIZE * 1024) must be greater than (5 MB)';
}
export function obj_write_multipart() {
if (__ENV.SLEEP_WRITE) {
sleep(__ENV.SLEEP_WRITE);
}
if (__ENV.SLEEP_WRITE) {
sleep(__ENV.SLEEP_WRITE);
}
const key = generateS3Key();
const bucket = bucket_list[Math.floor(Math.random() * bucket_list.length)];
const key = __ENV.OBJ_NAME || uuidv4();
const bucket = bucket_list[Math.floor(Math.random() * bucket_list.length)];
const payload = generator.genPayload();
const resp = s3_client.multipart(
bucket, key, write_multipart_part_size, write_multipart_vu_count,
payload);
if (!resp.success) {
log.withFields({bucket: bucket, key: key}).error(resp.error);
return;
}
const payload = generator.genPayload();
const resp = s3_client.multipart(bucket, key, write_multipart_part_size, write_multipart_vu_count, payload);
if (!resp.success) {
log.withFields({bucket: bucket, key: key}).error(resp.error);
return;
}
if (obj_registry) {
obj_registry.addObject('', '', bucket, key, payload.hash());
}
if (obj_registry) {
obj_registry.addObject("", "", bucket, key, payload.hash());
}
}

View file

@ -1,71 +1,67 @@
import {SharedArray} from 'k6/data';
import exec from 'k6/execution';
import logging from 'k6/x/frostfs/logging';
import registry from 'k6/x/frostfs/registry';
import s3local from 'k6/x/frostfs/s3local';
import stats from 'k6/x/frostfs/stats';
import {generateS3Key} from './libs/keygen.js';
import {newGenerator} from './libs/datagen.js';
import {parseEnv} from './libs/env-parser.js';
import {textSummary} from './libs/k6-summary-0.0.2.js';
import {uuidv4} from './libs/k6-utils-1.4.0.js';
import s3local from 'k6/x/frostfs/s3local';
import { SharedArray } from 'k6/data';
import { textSummary } from './libs/k6-summary-0.0.2.js';
import { parseEnv } from './libs/env-parser.js';
import { uuidv4 } from './libs/k6-utils-1.4.0.js';
import exec from 'k6/execution';
import { newGenerator } from './libs/datagen.js';
parseEnv();
const obj_list = new SharedArray('obj_list', function() {
return JSON.parse(open(__ENV.PREGEN_JSON)).objects;
const obj_list = new SharedArray('obj_list', function () {
return JSON.parse(open(__ENV.PREGEN_JSON)).objects;
});
const container_list = new SharedArray('container_list', function() {
return JSON.parse(open(__ENV.PREGEN_JSON)).containers;
const container_list = new SharedArray('container_list', function () {
return JSON.parse(open(__ENV.PREGEN_JSON)).containers;
});
const bucket_list = new SharedArray('bucket_list', function() {
return JSON.parse(open(__ENV.PREGEN_JSON)).buckets;
const bucket_list = new SharedArray('bucket_list', function () {
return JSON.parse(open(__ENV.PREGEN_JSON)).buckets;
});
function bucket_mapping() {
if (container_list.length != bucket_list.length) {
throw 'The number of containers and buckets in the preset file must be the same.';
}
let mapping = {};
for (let i = 0; i < container_list.length; ++i) {
mapping[bucket_list[i]] = container_list[i];
}
return mapping;
if (container_list.length != bucket_list.length) {
throw 'The number of containers and buckets in the preset file must be the same.';
}
let mapping = {};
for (let i = 0; i < container_list.length; ++i) {
mapping[bucket_list[i]] = container_list[i];
}
return mapping;
}
const read_size = JSON.parse(open(__ENV.PREGEN_JSON)).obj_size;
const summary_json = __ENV.SUMMARY_JSON || '/tmp/summary.json';
const summary_json = __ENV.SUMMARY_JSON || "/tmp/summary.json";
const config_file = __ENV.CONFIG_FILE;
const config_dir = __ENV.CONFIG_DIR;
const max_total_size_gb =
__ENV.MAX_TOTAL_SIZE_GB ? parseInt(__ENV.MAX_TOTAL_SIZE_GB) : 0;
const s3_client = s3local.connect(
config_file, config_dir, {
'debug_logger': __ENV.DEBUG_LOGGER || 'false',
},
bucket_mapping(), max_total_size_gb);
const log = logging.new().withFields(
{'config_file': config_file, 'config_dir': config_dir});
const max_total_size_gb = __ENV.MAX_TOTAL_SIZE_GB ? parseInt(__ENV.MAX_TOTAL_SIZE_GB) : 0;
const s3_client = s3local.connect(config_file, config_dir, {
'debug_logger': __ENV.DEBUG_LOGGER || 'false',
}, bucket_mapping(), max_total_size_gb);
const log = logging.new().withFields({"config_file": config_file,"config_dir": config_dir});
if (!!__ENV.METRIC_TAGS) {
stats.setTags(__ENV.METRIC_TAGS)
stats.setTags(__ENV.METRIC_TAGS)
}
const registry_enabled = !!__ENV.REGISTRY_FILE;
const obj_registry =
registry_enabled ? registry.open(__ENV.REGISTRY_FILE) : undefined;
const obj_registry = registry_enabled ? registry.open(__ENV.REGISTRY_FILE) : undefined;
let obj_to_read_selector = undefined;
if (registry_enabled) {
obj_to_read_selector = registry.getLoopedSelector(
__ENV.REGISTRY_FILE, 'obj_to_read',
__ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0, {
status: 'created',
})
obj_to_read_selector = registry.getSelector(
__ENV.REGISTRY_FILE,
"obj_to_read",
__ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0,
{
status: "created",
}
)
}
const duration = __ENV.DURATION;
@ -75,99 +71,96 @@ const scenarios = {};
const write_vu_count = parseInt(__ENV.WRITERS || '0');
const generator = newGenerator(write_vu_count > 0);
if (write_vu_count > 0) {
scenarios.write = {
executor: 'constant-vus',
vus: write_vu_count,
duration: `${duration}s`,
exec: 'obj_write',
gracefulStop: '5s',
};
scenarios.write = {
executor: 'constant-vus',
vus: write_vu_count,
duration: `${duration}s`,
exec: 'obj_write',
gracefulStop: '5s',
};
}
const read_vu_count = parseInt(__ENV.READERS || '0');
if (read_vu_count > 0) {
scenarios.read = {
executor: 'constant-vus',
vus: read_vu_count,
duration: `${duration}s`,
exec: 'obj_read',
gracefulStop: '5s',
};
scenarios.read = {
executor: 'constant-vus',
vus: read_vu_count,
duration: `${duration}s`,
exec: 'obj_read',
gracefulStop: '5s',
};
}
export const options = {
scenarios,
setupTimeout: '5s',
scenarios,
setupTimeout: '5s',
};
export function setup() {
const total_vu_count = write_vu_count + read_vu_count;
const total_vu_count = write_vu_count + read_vu_count;
console.log(`Pregenerated buckets: ${bucket_list.length}`);
console.log(`Pregenerated read object size: ${read_size}`);
console.log(`Pregenerated total objects: ${obj_list.length}`);
console.log(`Reading VUs: ${read_vu_count}`);
console.log(`Writing VUs: ${write_vu_count}`);
console.log(`Total VUs: ${total_vu_count}`);
console.log(`Pregenerated buckets: ${bucket_list.length}`);
console.log(`Pregenerated read object size: ${read_size}`);
console.log(`Pregenerated total objects: ${obj_list.length}`);
console.log(`Reading VUs: ${read_vu_count}`);
console.log(`Writing VUs: ${write_vu_count}`);
console.log(`Total VUs: ${total_vu_count}`);
const start_timestamp = Date.now()
console.log(
`Load started at: ${Date(start_timestamp).toString()}`)
const start_timestamp = Date.now()
console.log(`Load started at: ${Date(start_timestamp).toString()}`)
}
export function teardown(data) {
if (obj_registry) {
obj_registry.close();
}
const end_timestamp = Date.now()
console.log(
`Load finished at: ${Date(end_timestamp).toString()}`)
if (obj_registry) {
obj_registry.close();
}
const end_timestamp = Date.now()
console.log(`Load finished at: ${Date(end_timestamp).toString()}`)
}
export function handleSummary(data) {
return {
'stdout': textSummary(data, {indent: ' ', enableColors: false}),
[summary_json]: JSON.stringify(data),
};
}
return {
'stdout': textSummary(data, { indent: ' ', enableColors: false }),
[summary_json]: JSON.stringify(data),
};
}
export function obj_write() {
const key = generateS3Key();
const bucket = bucket_list[Math.floor(Math.random() * bucket_list.length)];
const key = __ENV.OBJ_NAME || uuidv4();
const bucket = bucket_list[Math.floor(Math.random() * bucket_list.length)];
const payload = generator.genPayload();
const resp = s3_client.put(bucket, key, payload);
if (!resp.success) {
if (resp.abort) {
exec.test.abort(resp.error);
const payload = generator.genPayload();
const resp = s3_client.put(bucket, key, payload);
if (!resp.success) {
if (resp.abort) {
exec.test.abort(resp.error);
}
log.withFields({bucket: bucket, key: key}).error(resp.error);
return;
}
log.withFields({bucket: bucket, key: key}).error(resp.error);
return;
}
if (obj_registry) {
obj_registry.addObject('', '', bucket, key, payload.hash());
}
if (obj_registry) {
obj_registry.addObject("", "", bucket, key, payload.hash());
}
}
export function obj_read() {
if (obj_to_read_selector) {
const obj = obj_to_read_selector.nextObject();
if (!obj) {
return;
if(obj_to_read_selector) {
const obj = obj_to_read_selector.nextObject();
if (!obj) {
return;
}
const resp = s3_client.get(obj.s3_bucket, obj.s3_key)
if (!resp.success) {
log.withFields({bucket: obj.s3_bucket, key: obj.s3_key}).error(resp.error);
}
return
}
const resp = s3_client.get(obj.s3_bucket, obj.s3_key)
const obj = obj_list[Math.floor(Math.random() * obj_list.length)];
const resp = s3_client.get(obj.bucket, obj.object);
if (!resp.success) {
log.withFields({bucket: obj.s3_bucket, key: obj.s3_key})
.error(resp.error);
}
return
}
const obj = obj_list[Math.floor(Math.random() * obj_list.length)];
const resp = s3_client.get(obj.bucket, obj.object);
if (!resp.success) {
log.withFields({bucket: obj.bucket, key: obj.object}).error(resp.error);
}
log.withFields({bucket: obj.bucket, key: obj.object}).error(resp.error);
}
}

View file

@ -1,21 +1,20 @@
import { sleep } from 'k6';
import { Counter } from 'k6/metrics';
import logging from 'k6/x/frostfs/logging';
import native from 'k6/x/frostfs/native';
import registry from 'k6/x/frostfs/registry';
import s3 from 'k6/x/frostfs/s3';
import stats from 'k6/x/frostfs/stats';
import { parseEnv } from './libs/env-parser.js';
import s3 from 'k6/x/frostfs/s3';
import logging from 'k6/x/frostfs/logging';
import { sleep } from 'k6';
import { Counter } from 'k6/metrics';
import { textSummary } from './libs/k6-summary-0.0.2.js';
import { parseEnv } from './libs/env-parser.js';
parseEnv();
const obj_registry = registry.open(__ENV.REGISTRY_FILE);
// Time limit (in seconds) for the run
const time_limit = __ENV.TIME_LIMIT || '60';
const summary_json = __ENV.SUMMARY_JSON || '/tmp/summary.json';
const time_limit = __ENV.TIME_LIMIT || "60";
const summary_json = __ENV.SUMMARY_JSON || "/tmp/summary.json";
// Number of objects in each status. These counters are cumulative in a
// sense that they reflect total number of objects in the registry, not just
@ -23,147 +22,144 @@ const summary_json = __ENV.SUMMARY_JSON || '/tmp/summary.json';
// This allows to run this scenario multiple times and collect overall
// statistics in the final run.
const obj_counters = {
verified: new Counter('verified_obj'),
skipped: new Counter('skipped_obj'),
invalid: new Counter('invalid_obj'),
verified: new Counter('verified_obj'),
skipped: new Counter('skipped_obj'),
invalid: new Counter('invalid_obj'),
};
let log = logging.new();
if (!!__ENV.METRIC_TAGS) {
stats.setTags(__ENV.METRIC_TAGS)
stats.setTags(__ENV.METRIC_TAGS)
}
// Connect to random gRPC endpoint
let grpc_client = undefined;
if (__ENV.GRPC_ENDPOINTS) {
const grpcEndpoints = __ENV.GRPC_ENDPOINTS.split(',');
const grpcEndpoint =
grpcEndpoints[Math.floor(Math.random() * grpcEndpoints.length)];
log = log.withField('endpoint', grpcEndpoint);
grpc_client = native.connect(
grpcEndpoint, '', __ENV.DIAL_TIMEOUT ? parseInt(__ENV.DIAL_TIMEOUT) : 0,
__ENV.STREAM_TIMEOUT ? parseInt(__ENV.STREAM_TIMEOUT) : 0,
__ENV.PREPARE_LOCALLY ? __ENV.PREPARE_LOCALLY.toLowerCase() === 'true' : false,
1024 * parseInt(__ENV.MAX_OBJECT_SIZE || '0'));
const grpcEndpoints = __ENV.GRPC_ENDPOINTS.split(',');
const grpcEndpoint = grpcEndpoints[Math.floor(Math.random() * grpcEndpoints.length)];
log = log.withField("endpoint", grpcEndpoint);
grpc_client = native.connect(grpcEndpoint, '',
__ENV.DIAL_TIMEOUT ? parseInt(__ENV.DIAL_TIMEOUT) : 0,
__ENV.STREAM_TIMEOUT ? parseInt(__ENV.STREAM_TIMEOUT) : 0,
__ENV.PREPARE_LOCALLY ? __ENV.PREPARE_LOCALLY.toLowerCase() === "true" : false, '');
}
// Connect to random S3 endpoint
let s3_client = undefined;
if (__ENV.S3_ENDPOINTS) {
const no_verify_ssl = __ENV.NO_VERIFY_SSL || 'true';
const connection_args = { no_verify_ssl: no_verify_ssl };
const s3_endpoints = __ENV.S3_ENDPOINTS.split(',');
const s3_endpoint =
s3_endpoints[Math.floor(Math.random() * s3_endpoints.length)];
log = log.withField('endpoint', s3_endpoint);
s3_client = s3.connect(s3_endpoint, connection_args);
const no_verify_ssl = __ENV.NO_VERIFY_SSL || "true";
const connection_args = {no_verify_ssl: no_verify_ssl}
const s3_endpoints = __ENV.S3_ENDPOINTS.split(',');
const s3_endpoint = s3_endpoints[Math.floor(Math.random() * s3_endpoints.length)];
log = log.withField("endpoint", s3_endpoint);
s3_client = s3.connect(s3_endpoint, connection_args);
}
// We will attempt to verify every object in "created" status. The scenario will
// execute as many iterations as there are objects. Each object will have 3
// retries to be verified
// We will attempt to verify every object in "created" status. The scenario will execute
// as many iterations as there are objects. Each object will have 3 retries to be verified
const obj_to_verify_selector = registry.getSelector(
__ENV.REGISTRY_FILE, 'obj_to_verify',
__ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0, {
status: 'created',
});
__ENV.REGISTRY_FILE,
"obj_to_verify",
__ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0,
{
status: "created",
}
);
const obj_to_verify_count = obj_to_verify_selector.count();
// Execute at least one iteration (executor shared-iterations can't run 0
// iterations)
// Execute at least one iteration (executor shared-iterations can't run 0 iterations)
const iterations = Math.max(1, obj_to_verify_count);
// Executor shared-iterations requires number of iterations to be larger than
// number of VUs
// Executor shared-iterations requires number of iterations to be larger than number of VUs
const vus = Math.min(__ENV.CLIENTS, iterations);
const scenarios = {
verify: {
executor: 'shared-iterations',
vus,
iterations,
maxDuration: `${time_limit}s`,
exec: 'obj_verify',
gracefulStop: '5s',
}
verify: {
executor: 'shared-iterations',
vus,
iterations,
maxDuration: `${time_limit}s`,
exec: 'obj_verify',
gracefulStop: '5s',
}
};
export const options = {
scenarios,
setupTimeout: '5s',
scenarios,
setupTimeout: '5s',
};
export function setup() {
// Populate counters with initial values
for (const [status, counter] of Object.entries(obj_counters)) {
const obj_selector = registry.getSelector(
__ENV.REGISTRY_FILE, status,
__ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0, { status });
counter.add(obj_selector.count());
}
// Populate counters with initial values
for (const [status, counter] of Object.entries(obj_counters)) {
const obj_selector = registry.getSelector(
__ENV.REGISTRY_FILE,
status,
__ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0,
{ status });
counter.add(obj_selector.count());
}
}
export function handleSummary(data) {
return {
'stdout': textSummary(data, { indent: ' ', enableColors: false }),
[summary_json]: JSON.stringify(data),
};
return {
'stdout': textSummary(data, { indent: ' ', enableColors: false }),
[summary_json]: JSON.stringify(data),
};
}
export function obj_verify() {
if (obj_to_verify_count == 0) {
log.info('Nothing to verify');
return;
}
if (obj_to_verify_count == 0) {
log.info("Nothing to verify");
return;
}
if (__ENV.SLEEP) {
sleep(__ENV.SLEEP);
}
if (__ENV.SLEEP) {
sleep(__ENV.SLEEP);
}
const obj = obj_to_verify_selector.nextObject();
if (!obj) {
log.info('All objects have been verified');
return;
}
const obj = obj_to_verify_selector.nextObject();
if (!obj) {
log.info("All objects have been verified");
return;
}
const obj_status = verify_object_with_retries(obj, 3);
obj_counters[obj_status].add(1);
obj_registry.setObjectStatus(obj.id, obj.status, obj_status);
const obj_status = verify_object_with_retries(obj, 3);
obj_counters[obj_status].add(1);
obj_registry.setObjectStatus(obj.id, obj.status, obj_status);
}
function verify_object_with_retries(obj, attempts) {
for (let i = 0; i < attempts; i++) {
let result;
// Different name is required.
// ReferenceError: Cannot access a variable before initialization.
let lg = log;
if (obj.c_id && obj.o_id) {
lg = lg.withFields({ cid: obj.c_id, oid: obj.o_id });
result = grpc_client.verifyHash(obj.c_id, obj.o_id, obj.payload_hash);
} else if (obj.s3_bucket && obj.s3_key) {
lg = lg.withFields({ bucket: obj.s3_bucket, key: obj.s3_key });
result =
s3_client.verifyHash(obj.s3_bucket, obj.s3_key, obj.payload_hash);
} else {
lg.withFields({
cid: obj.c_id,
oid: obj.o_id,
bucket: obj.s3_bucket,
key: obj.s3_key
}).warn(`Object cannot be verified with supported protocols`);
return 'skipped';
for (let i = 0; i < attempts; i++) {
let result;
// Different name is required.
// ReferenceError: Cannot access a variable before initialization.
let lg = log;
if (obj.c_id && obj.o_id) {
lg = lg.withFields({cid: obj.c_id, oid: obj.o_id});
result = grpc_client.verifyHash(obj.c_id, obj.o_id, obj.payload_hash);
} else if (obj.s3_bucket && obj.s3_key) {
lg = lg.withFields({bucket: obj.s3_bucket, key: obj.s3_key});
result = s3_client.verifyHash(obj.s3_bucket, obj.s3_key, obj.payload_hash);
} else {
lg.withFields({
cid: obj.c_id,
oid: obj.o_id,
bucket: obj.s3_bucket,
key: obj.s3_key
}).warn(`Object cannot be verified with supported protocols`);
return "skipped";
}
if (result.success) {
return "verified";
} else if (result.error == "hash mismatch") {
return "invalid";
}
// Unless we explicitly saw that there was a hash mismatch, then we will retry after a delay
lg.error(`Verify error: ${result.error}. Object will be re-tried`);
sleep(__ENV.SLEEP);
}
if (result.success) {
return 'verified';
} else if (result.error == 'hash mismatch') {
return 'invalid';
}
// Unless we explicitly saw that there was a hash mismatch, then we will
// retry after a delay
lg.error(`Verify error: ${result.error}. Object will be re-tried`);
sleep(__ENV.SLEEP);
}
return 'invalid';
return "invalid";
}