Compare commits

...

38 commits

Author SHA1 Message Date
ebbc5bc0a7 [#192] s3: Fix deprecated endpoint resolver
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2025-01-13 17:16:40 +03:00
ddd86d1f23 [#191] git: Ignore go.sum in diff
Signed-off-by: Anton Nikiforov <an.nikiforov@yadro.com>
2025-01-13 15:33:07 +03:00
f4b43b264f [#191] scenarios: Bump version for node, sdk-go, s3-gw and neo-go
Signed-off-by: Anton Nikiforov <an.nikiforov@yadro.com>
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2025-01-13 15:33:01 +03:00
8f9c02253c
[#188] logging: Allow to print date in log messages
`timeonly`:
```
INFO[14:05:21] kek                                           endpoint=1
```

`datetime`:
```
INFO[2025-01-10 14:03:58] kek                                           endpoint=1
```

`none`
```
INFO kek                                           endpoint=1
```

Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2025-01-10 14:42:56 +03:00
829777bd53
[#190] scenarios: Fix missing map file warning
```
WARN[13:55:48] Couldn't load source map for file:///repo/frostfs/k6/scenarios/libs/k6-utils-1.4.0.js  error="The moduleSpecifier \"file:///repo/frostfs/k6/scenarios/libs/index.js.map\" couldn't be found on local disk. Make sure that you've specified the right path to the file. If you're running k6 using the Docker image make sure you have mounted the local directory (-v /local/path/:/inside/docker/path) containing your script and modules so that they're accessible by k6 from inside of the container, see https://k6.io/docs/using-k6/modules#using-local-modules-with-docker."
```

Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2025-01-10 14:42:55 +03:00
3ebb3dda0a
[#190] logging: Use time format constants from stdlib
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2025-01-10 14:10:08 +03:00
3c6023ca29 [#114] Fix examples
With #114, upload data functions now works with
datagen payload only, thus examples has to be updated.

Signed-off-by: Alex Vanin <a.vanin@yadro.com>
2024-12-28 13:57:54 +03:00
a326fbcbf8 [#181] Support AWS credentials profiles
Signed-off-by: Alex Vanin <a.vanin@yadro.com>
2024-12-28 12:52:27 +03:00
14f26e47dc [#185] native: Issue session token on the previous epoch
Consider 2 nodes, A and B. Because of the race condition, A has epoch N,
and B has (still) epoch N-1. Creating session token and putting object
on the node A will set issuing epoch to N, thus failing validation on
the node B.

Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2024-12-19 12:37:24 +00:00
296971e57c [#183] Refine CODEOWNERS settings
Signed-off-by: Vitaliy Potyarkin <v.potyarkin@yadro.com>
2024-12-10 16:21:48 +03:00
76fd5c9706 [#180] preset_grpc: Add ability to attach rule for created container
Signed-off-by: Anton Nikiforov <an.nikiforov@yadro.com>
2024-11-15 13:55:30 +03:00
f0cbf9c301 [#180] preset_grpc: Remove deprecated parameter acl
Signed-off-by: Anton Nikiforov <an.nikiforov@yadro.com>
2024-11-14 15:35:13 +03:00
124397578d
[#100] preset_s3: Add a flag for percent of versioned buckets
Add flag "--buckets_versioned". Default is 0 (no versioned buckets)

Signed-off-by: Nikita Zinkevich <n.zinkevich@yadro.com>
2024-11-12 18:21:29 +03:00
a7079cda60
[#100] s3: Add a flag for permanent versioned object deletion
Signed-off-by: Nikita Zinkevich <n.zinkevich@yadro.com>
2024-11-12 18:21:28 +03:00
d3d5a1baed
[#100] s3: Support creating of versioned bucket
Add "versioning" parameter handling in CreateBucket method

Signed-off-by: Nikita Zinkevich <n.zinkevich@yadro.com>
2024-11-12 18:21:28 +03:00
72d24b04a3 [#179] Remove mentions of GitHub from contributor docs
Signed-off-by: Vitaliy Potyarkin <v.potyarkin@yadro.com>
2024-11-06 15:41:48 +03:00
f5df03c718
[#173] s3: Fix missing import
Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2024-10-28 19:06:57 +03:00
1c7a3b3b6c
[#173] s3: Support variable key length
Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2024-10-28 18:57:51 +03:00
e0cbc3b763
[#124] s3: Allow to specify directory height and width
Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2024-10-25 12:20:15 +03:00
54f99dac1d [#172] Update README.md
Add info about `xk6-registry import` to README.md.

Signed-off-by: Ekaterina Lebedeva <ekaterina.lebedeva@yadro.com>
2024-10-08 20:27:17 +03:00
591f8af161 [#172] cli: Fix registry importer usage description
`status` flag is currently unsupported by `xk6-registry import`.

Signed-off-by: Ekaterina Lebedeva <ekaterina.lebedeva@yadro.com>
2024-10-08 20:27:01 +03:00
c2b8944af6 [#171] Makefile: add target to install golangci-lint
Signed-off-by: Ekaterina Lebedeva <ekaterina.lebedeva@yadro.com>
2024-10-02 16:48:45 +03:00
a47bf149d8 [#161] go.mod: Bump go version to 1.22
Signed-off-by: Ekaterina Lebedeva <ekaterina.lebedeva@yadro.com>
2024-10-02 12:24:19 +00:00
bcbd0db25f [#169] Fix broken logo link in README
Signed-off-by: Vitaliy Potyarkin <v.potyarkin@yadro.com>
2024-09-13 15:17:51 +03:00
17bbbe53e6 [#168] Update obsolete URLs
Signed-off-by: Vitaliy Potyarkin <v.potyarkin@yadro.com>
2024-09-11 14:10:53 +03:00
bede693470 [#153] selector: Add VU synchronization in 'Oneshot' mode
Signed-off-by: Alexander Chuprov <a.chuprov@yadro.com>
2024-09-10 11:56:40 +03:00
f539da7d89 [#166] preset: Add missing container_creation_retry parameter
Signed-off-by: Liza <e.chichindaeva@yadro.com>
2024-08-30 16:39:58 +03:00
6d3ecb6528 [#154] Add registry import cli utility
* Currently, objects created in preset are never deleted.
  k6 deletes only objects from registry, if registry file
  is not provided k6 delete load fails.
* Added cli utility to import objects created in preset
  into registry so k6 can delete them normally.

Signed-off-by: Ekaterina Lebedeva <ekaterina.lebedeva@yadro.com>
2024-08-23 13:41:01 +03:00
75f670b392 [#159] preset: Add optional max number of retries to create a container instead of hard-coded number 20
Signed-off-by: s.makhov <s.makhov@yadro.com>
2024-08-02 12:22:52 +03:00
9b9db46a07 [#152] Allow to set mix of policies for containers and buckets
Signed-off-by: a.berezin <a.berezin@yadro.com>
2024-07-02 20:45:31 +03:00
335c45c578 [#149] selector: Add read timeout
If there are no objects for 10 second, then return nil.
It is required to prevent VU iteration hang if there
are no objects pushed to registry.

Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2024-06-24 13:59:12 +03:00
e7d4dd404a [#150] scenarios: Use SelectorAwaiting for read and delete load in s3_dar.js, make delete_age optional
Signed-off-by: m.malygina <m.malygina@yadro.com>
2024-06-21 15:52:57 +03:00
0a9aeab47c [#150] In case we are running both read and delete load SelectorAwaiting
Signed-off-by: m.malygina <m.malygina@yadro.com>
2024-06-21 10:55:18 +03:00
3bc1229062 [#146] native: Add NetworkInfo cache
Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2024-06-17 09:32:20 +03:00
e92ce668a8 [#145] scenarios: Format js files with clang
Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2024-06-14 11:26:01 +03:00
6d1e7eb49e [#145] native: Allow to specify max_obj_size
For locally prepared objects it is possible now to
specify cut size.

Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2024-06-14 10:57:30 +03:00
f90a645594 [#144] registry: Add tests for obj registry exporter
Signed-off-by: Ekaterina Lebedeva <ekaterina.lebedeva@yadro.com>
2024-06-11 18:09:20 +03:00
3f67606f02 [#144] registry: Fix string generator func in obj info test
`rune` is alias for `int32`. `randString()` used `rand.Int()`
which returns system-specific non-negative integer number. For
64-bit systems it will be int64. An attempt to cast `int64` to
`int32` (`rune`) leads the latter to be a negative number in
case of overflow. This caused the resulting string containing
unexpected symbols.

Signed-off-by: Ekaterina Lebedeva <ekaterina.lebedeva@yadro.com>
2024-06-11 17:43:15 +03:00
52 changed files with 1097 additions and 396 deletions

View file

@ -1 +0,0 @@
* @TrueCloudLab/storage-core @TrueCloudLab/storage-services

View file

@ -13,7 +13,7 @@ jobs:
- name: Setup Go - name: Setup Go
uses: actions/setup-go@v3 uses: actions/setup-go@v3
with: with:
go-version: '1.21' go-version: '1.22'
- name: Run commit format checker - name: Run commit format checker
uses: https://git.frostfs.info/TrueCloudLab/dco-go@v2 uses: https://git.frostfs.info/TrueCloudLab/dco-go@v2

View file

@ -11,20 +11,21 @@ jobs:
- name: Set up Go - name: Set up Go
uses: actions/setup-go@v3 uses: actions/setup-go@v3
with: with:
go-version: '1.22' go-version: '1.23'
cache: true cache: true
- name: golangci-lint - name: Install linters
uses: https://github.com/golangci/golangci-lint-action@v3 run: make lint-install
with:
version: latest - name: Run linters
run: make lint
tests: tests:
name: Tests name: Tests
runs-on: ubuntu-latest runs-on: ubuntu-latest
strategy: strategy:
matrix: matrix:
go_versions: [ '1.21', '1.22' ] go_versions: [ '1.22', '1.23' ]
fail-fast: false fail-fast: false
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
@ -47,7 +48,7 @@ jobs:
- name: Set up Go - name: Set up Go
uses: actions/setup-go@v3 uses: actions/setup-go@v3
with: with:
go-version: '1.21' go-version: '1.22'
cache: true cache: true
- name: Run tests - name: Run tests

1
.gitattributes vendored Normal file
View file

@ -0,0 +1 @@
/go.sum -diff

3
CODEOWNERS Normal file
View file

@ -0,0 +1,3 @@
.* @TrueCloudLab/storage-core-committers @TrueCloudLab/storage-core-developers @TrueCloudLab/storage-services-committers @TrueCloudLab/storage-services-developers
.forgejo/.* @potyarkin
Makefile @potyarkin

View file

@ -3,8 +3,8 @@
First, thank you for contributing! We love and encourage pull requests from First, thank you for contributing! We love and encourage pull requests from
everyone. Please follow the guidelines: everyone. Please follow the guidelines:
- Check the open [issues](https://github.com/TrueCloudLab/xk6-frostfs/issues) and - Check the open [issues](https://git.frostfs.info/TrueCloudLab/xk6-frostfs/issues) and
[pull requests](https://github.com/TrueCloudLab/xk6-frostfs/pulls) for existing [pull requests](https://git.frostfs.info/TrueCloudLab/xk6-frostfs/pulls) for existing
discussions. discussions.
- Open an issue first, to discuss a new feature or enhancement. - Open an issue first, to discuss a new feature or enhancement.
@ -27,19 +27,20 @@ Start by forking the `xk6-frostfs` repository, make changes in a branch and then
send a pull request. We encourage pull requests to discuss code changes. Here send a pull request. We encourage pull requests to discuss code changes. Here
are the steps in details: are the steps in details:
### Set up your GitHub Repository ### Set up your repository
Fork [xk6-frostfs upstream](https://github.com/TrueCloudLab/xk6-frostfs/fork) source
Fork [xk6-frostfs upstream](https://git.frostfs.info/TrueCloudLab/xk6-frostfs/fork) source
repository to your own personal repository. Copy the URL of your fork (you will repository to your own personal repository. Copy the URL of your fork (you will
need it for the `git clone` command below). need it for the `git clone` command below).
```sh ```sh
$ git clone https://github.com/TrueCloudLab/xk6-frostfs $ git clone https://git.frostfs.info/TrueCloudLab/xk6-frostfs
``` ```
### Set up git remote as ``upstream`` ### Set up git remote as ``upstream``
```sh ```sh
$ cd xk6-frostfs $ cd xk6-frostfs
$ git remote add upstream https://github.com/TrueCloudLab/xk6-frostfs $ git remote add upstream https://git.frostfs.info/TrueCloudLab/xk6-frostfs
$ git fetch upstream $ git fetch upstream
$ git merge upstream/master $ git merge upstream/master
... ...
@ -89,7 +90,7 @@ $ git push origin feature/123-something_awesome
``` ```
### Create a Pull Request ### Create a Pull Request
Pull requests can be created via GitHub. Refer to [this Pull requests can be created via git.frostfs.info. Refer to [this
document](https://help.github.com/articles/creating-a-pull-request/) for document](https://help.github.com/articles/creating-a-pull-request/) for
detailed steps on how to create a pull request. After a Pull Request gets peer detailed steps on how to create a pull request. After a Pull Request gets peer
reviewed and approved, it will be merged. reviewed and approved, it will be merged.

View file

@ -3,10 +3,15 @@
# Common variables # Common variables
REPO ?= $(shell go list -m) REPO ?= $(shell go list -m)
VERSION ?= $(shell git describe --tags --dirty --match "v*" --always --abbrev=8 2>/dev/null || cat VERSION 2>/dev/null || echo "develop") VERSION ?= $(shell git describe --tags --dirty --match "v*" --always --abbrev=8 2>/dev/null || cat VERSION 2>/dev/null || echo "develop")
GO_VERSION ?= 1.19 GO_VERSION ?= 1.22
LINT_VERSION ?= 1.49.0 LINT_VERSION ?= 1.60.3
TRUECLOUDLAB_LINT_VERSION ?= 0.0.7
BINDIR = bin BINDIR = bin
OUTPUT_LINT_DIR ?= $(abspath $(BINDIR))/linters
LINT_DIR = $(OUTPUT_LINT_DIR)/golangci-lint-$(LINT_VERSION)-v$(TRUECLOUDLAB_LINT_VERSION)
TMP_DIR := .cache
# Binaries to build # Binaries to build
CMDS = $(addprefix frostfs-, $(notdir $(wildcard cmd/*))) CMDS = $(addprefix frostfs-, $(notdir $(wildcard cmd/*)))
BINS = $(addprefix $(BINDIR)/, $(CMDS)) BINS = $(addprefix $(BINDIR)/, $(CMDS))
@ -64,7 +69,22 @@ format:
# Run linters # Run linters
lint: lint:
@golangci-lint --timeout=5m run @if [ ! -d "$(LINT_DIR)" ]; then \
make lint-install; \
fi
$(LINT_DIR)/golangci-lint run --timeout=5m
# Install linters
lint-install:
@rm -rf $(OUTPUT_LINT_DIR)
@mkdir -p $(OUTPUT_LINT_DIR)
@mkdir -p $(TMP_DIR)
@rm -rf $(TMP_DIR)/linters
@git -c advice.detachedHead=false clone --branch v$(TRUECLOUDLAB_LINT_VERSION) https://git.frostfs.info/TrueCloudLab/linters.git $(TMP_DIR)/linters
@@make -C $(TMP_DIR)/linters lib CGO_ENABLED=1 OUT_DIR=$(OUTPUT_LINT_DIR)
@rm -rf $(TMP_DIR)/linters
@rmdir $(TMP_DIR) 2>/dev/null || true
@CGO_ENABLED=1 GOBIN=$(LINT_DIR) go install -trimpath github.com/golangci/golangci-lint/cmd/golangci-lint@v$(LINT_VERSION)
# Run linters in Docker # Run linters in Docker
docker/lint: docker/lint:

View file

@ -1,5 +1,5 @@
<p align="center"> <p align="center">
<img src="./.github/logo.svg" width="500px" alt="FrostFS logo"> <img src="./.forgejo/logo.svg" width="500px" alt="FrostFS logo">
</p> </p>
<p align="center"> <p align="center">
<a href="https://go.k6.io/k6">k6</a> extension to test and benchmark FrostFS related protocols. <a href="https://go.k6.io/k6">k6</a> extension to test and benchmark FrostFS related protocols.
@ -48,15 +48,16 @@ Create native client with `connect` method. Arguments:
- dial timeout in seconds (0 for the default value) - dial timeout in seconds (0 for the default value)
- stream timeout in seconds (0 for the default value) - stream timeout in seconds (0 for the default value)
- generate object header on the client side (for big object - split locally too) - generate object header on the client side (for big object - split locally too)
- max size for generated object header on the client side (for big object - the size that the object is splitted into)
```js ```js
import native from 'k6/x/frostfs/native'; import native from 'k6/x/frostfs/native';
const frostfs_cli = native.connect("s01.frostfs.devenv:8080", "", 0, 0, false) const frostfs_cli = native.connect("s01.frostfs.devenv:8080", "", 0, 0, false, 0)
``` ```
### Methods ### Methods
- `putContainer(params)`. The `params` is a dictionary (e.g. - `putContainer(params)`. The `params` is a dictionary (e.g.
`{acl:'public-read-write',placement_policy:'REP 3',name:'container-name',name_global_scope:'false'}`). `{placement_policy:'REP 3',name:'container-name',name_global_scope:'false'}`).
Returns dictionary with `success` Returns dictionary with `success`
boolean flag, `container_id` string, and `error` string. boolean flag, `container_id` string, and `error` string.
- `setBufferSize(size)`. Sets internal buffer size for data upload and - `setBufferSize(size)`. Sets internal buffer size for data upload and
@ -106,11 +107,12 @@ const s3_cli = s3.connect("https://s3.frostfs.devenv:8080")
You can also provide additional options: You can also provide additional options:
```js ```js
import s3 from 'k6/x/frostfs/s3'; import s3 from 'k6/x/frostfs/s3';
const s3_cli = s3.connect("https://s3.frostfs.devenv:8080", {'no_verify_ssl': 'true', 'timeout': '60s'}) const s3_cli = s3.connect("https://s3.frostfs.devenv:8080", {'no_verify_ssl': 'true', 'timeout': '60s', 'aws_profile': 'metal'})
``` ```
* `no_verify_ss` - Bool. If `true` - skip verifying the s3 certificate chain and host name (useful if s3 uses self-signed certificates) * `no_verify_ss` - Bool. If `true` - skip verifying the s3 certificate chain and host name (useful if s3 uses self-signed certificates)
* `timeout` - Duration. Set timeout for requests (in http client). If omitted or zero - timeout is infinite. * `timeout` - Duration. Set timeout for requests (in http client). If omitted or zero - timeout is infinite.
* `aws_profile` - String. Use custom profile credentials from `$HOME/.aws/credentials` file. If omitted or empty - use default profile.
### Methods ### Methods
- `createBucket(bucket, params)`. Returns dictionary with `success` boolean flag - `createBucket(bucket, params)`. Returns dictionary with `success` boolean flag
@ -185,6 +187,25 @@ Flags:
-v, --version version for registry-exporter -v, --version version for registry-exporter
``` ```
## Import pregen into registry db
You can import pregenerated json files into registry bolt db. Use `frostfs-xk6-registry import`. Usage examples are in help:
```shell
$ ./bin/frostfs-xk6-registry import -h
Import objects into registry from pregenerated files
Usage:
xk6-registry import [flags]
Examples:
xk6-registry import registry.bolt preset.json
xk6-registry import registry.bolt preset.json another_preset.json
Flags:
-h, --help help for import
```
# License # License
- [GNU General Public License v3.0](LICENSE) - [GNU General Public License v3.0](LICENSE)

View file

@ -0,0 +1,55 @@
package importer
import (
"encoding/json"
"os"
"git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/registry"
)
type PreGenObj struct {
Bucket string `json:"bucket"`
Object string `json:"object"`
Container string `json:"container"`
}
type PreGenerateInfo struct {
Buckets []string `json:"buckets"`
Containers []string `json:"containers"`
Objects []PreGenObj `json:"objects"`
ObjSize string `json:"obj_size"`
}
// ImportJSONPreGen writes objects from pregenerated JSON file
// to the registry.
// Note that ImportJSONPreGen does not check if object already
// exists in the registry so in case of re-entry the registry
// will have two entities representing the same object.
func ImportJSONPreGen(o *registry.ObjRegistry, filename string) error {
f, err := os.ReadFile(filename)
if err != nil {
return err
}
var pregenInfo PreGenerateInfo
err = json.Unmarshal(f, &pregenInfo)
if err != nil {
return err
}
// AddObject uses DB.Batch to combine concurrent Batch calls
// into a single Bolt transaction. DB.Batch is limited by
// DB.MaxBatchDelay which may affect perfomance.
for _, obj := range pregenInfo.Objects {
if obj.Bucket != "" {
err = o.AddObject("", "", obj.Bucket, obj.Object, "")
} else {
err = o.AddObject(obj.Container, obj.Object, "", "", "")
}
if err != nil {
return err
}
}
return nil
}

View file

@ -0,0 +1,27 @@
package importer
import (
"git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/registry"
"github.com/spf13/cobra"
)
// Cmd represents the import command.
var Cmd = &cobra.Command{
Use: "import",
Short: "Import objects into registry",
Long: "Import objects into registry from pregenerated files",
Example: `xk6-registry import registry.bolt preset.json
xk6-registry import registry.bolt preset.json another_preset.json`,
RunE: runCmd,
Args: cobra.MinimumNArgs(2),
}
func runCmd(cmd *cobra.Command, args []string) error {
objRegistry := registry.NewObjRegistry(cmd.Context(), args[0])
for i := 1; i < len(args); i++ {
if err := ImportJSONPreGen(objRegistry, args[i]); err != nil {
return err
}
}
return nil
}

18
cmd/xk6-registry/main.go Normal file
View file

@ -0,0 +1,18 @@
package main
import (
"context"
"os"
"os/signal"
"syscall"
)
func main() {
ctx, _ := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM, syscall.SIGHUP)
if cmd, err := rootCmd.ExecuteContextC(ctx); err != nil {
cmd.PrintErrln("Error:", err.Error())
cmd.PrintErrf("Run '%v --help' for usage.\n", cmd.CommandPath())
os.Exit(1)
}
}

33
cmd/xk6-registry/root.go Normal file
View file

@ -0,0 +1,33 @@
package main
import (
"runtime"
"git.frostfs.info/TrueCloudLab/xk6-frostfs/cmd/xk6-registry/importer"
"git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/version"
"github.com/spf13/cobra"
)
var rootCmd = &cobra.Command{
Use: "xk6-registry",
Version: version.Version,
Short: "Command Line Tool to work with Registry",
Long: `Registry provides tools to work with object registry for xk6.
It contains command for importing objects in registry from preset`,
SilenceErrors: true,
SilenceUsage: true,
Run: rootCmdRun,
}
func init() {
cobra.AddTemplateFunc("runtimeVersion", runtime.Version)
rootCmd.SetVersionTemplate(`FrostFS xk6-registry
{{printf "Version: %s" .Version }}
GoVersion: {{ runtimeVersion }}
`)
rootCmd.AddCommand(importer.Cmd)
}
func rootCmdRun(cmd *cobra.Command, _ []string) {
_ = cmd.Usage()
}

View file

@ -1,7 +1,8 @@
import local from 'k6/x/frostfs/local'; import local from 'k6/x/frostfs/local';
import datagen from 'k6/x/frostfs/datagen';
import { uuidv4 } from '../scenarios/libs/k6-utils-1.4.0.js'; import { uuidv4 } from '../scenarios/libs/k6-utils-1.4.0.js';
const payload = open('../go.sum', 'b'); const generator = datagen.generator(1024, "random", false);
const local_cli = local.connect("/path/to/config.yaml", "/path/to/config/dir", "", false) const local_cli = local.connect("/path/to/config.yaml", "/path/to/config/dir", "", false)
export const options = { export const options = {
@ -15,6 +16,7 @@ export default function () {
'unique_header': uuidv4() 'unique_header': uuidv4()
} }
const container_id = '6BVPPXQewRJ6J5EYmAPLczXxNocS7ikyF7amS2esWQnb'; const container_id = '6BVPPXQewRJ6J5EYmAPLczXxNocS7ikyF7amS2esWQnb';
const payload = generator.genPayload()
let resp = local_cli.put(container_id, headers, payload) let resp = local_cli.put(container_id, headers, payload)
if (resp.success) { if (resp.success) {
local_cli.get(container_id, resp.object_id) local_cli.get(container_id, resp.object_id)

View file

@ -1,19 +1,19 @@
import native from 'k6/x/frostfs/native'; import native from 'k6/x/frostfs/native';
import datagen from 'k6/x/frostfs/datagen';
import { fail } from "k6"; import { fail } from "k6";
import { uuidv4 } from '../scenarios/libs/k6-utils-1.4.0.js'; import { uuidv4 } from '../scenarios/libs/k6-utils-1.4.0.js';
const payload = open('../go.sum', 'b'); const generator = datagen.generator(1024, "random", false);
const frostfs_cli = native.connect("s01.frostfs.devenv:8080", "1dd37fba80fec4e6a6f13fd708d8dcb3b29def768017052f6c930fa1c5d90bbb", 0, 0, false) const frostfs_cli = native.connect("s01.frostfs.devenv:8080", "1dd37fba80fec4e6a6f13fd708d8dcb3b29def768017052f6c930fa1c5d90bbb", 0, 0, false, 0)
export const options = { export const options = {
stages: [ stages: [
{duration: '30s', target: 10}, { duration: '30s', target: 10 },
], ],
}; };
export function setup() { export function setup() {
const params = { const params = {
acl: 'public-read-write',
placement_policy: 'REP 3', placement_policy: 'REP 3',
name: 'container-name', name: 'container-name',
name_global_scope: 'false' name_global_scope: 'false'
@ -24,10 +24,11 @@ export function setup() {
fail(res.error) fail(res.error)
} }
console.info("created container", res.container_id) console.info("created container", res.container_id)
return {container_id: res.container_id} return { container_id: res.container_id }
} }
export default function (data) { export default function (data) {
const payload = generator.genPayload()
let headers = { let headers = {
'unique_header': uuidv4() 'unique_header': uuidv4()
} }

View file

@ -1,9 +1,11 @@
import native from 'k6/x/frostfs/native'; import native from 'k6/x/frostfs/native';
import datagen from 'k6/x/frostfs/datagen';
import { uuidv4 } from '../scenarios/libs/k6-utils-1.4.0.js'; import { uuidv4 } from '../scenarios/libs/k6-utils-1.4.0.js';
const payload = open('../go.sum', 'b'); const generator = datagen.generator(1024, "random", false);
const payload = generator.genPayload()
const container = "AjSxSNNXbJUDPqqKYm1VbFVDGCakbpUNH8aGjPmGAH3B" const container = "AjSxSNNXbJUDPqqKYm1VbFVDGCakbpUNH8aGjPmGAH3B"
const frostfs_cli = native.connect("s01.frostfs.devenv:8080", "", 0, 0, false) const frostfs_cli = native.connect("s01.frostfs.devenv:8080", "", 0, 0, false, 0)
const frostfs_obj = frostfs_cli.onsite(container, payload) const frostfs_obj = frostfs_cli.onsite(container, payload)
export const options = { export const options = {

View file

@ -1,8 +1,9 @@
import s3 from 'k6/x/frostfs/s3'; import s3 from 'k6/x/frostfs/s3';
import datagen from 'k6/x/frostfs/datagen';
import { fail } from 'k6' import { fail } from 'k6'
import { uuidv4 } from '../scenarios/libs/k6-utils-1.4.0.js'; import { uuidv4 } from '../scenarios/libs/k6-utils-1.4.0.js';
const payload = open('../go.sum', 'b'); const generator = datagen.generator(1024, "random", false);
const bucket = "cats" const bucket = "cats"
const s3_cli = s3.connect("https://s3.frostfs.devenv:8080", {'no_verify_ssl': 'true'}) const s3_cli = s3.connect("https://s3.frostfs.devenv:8080", {'no_verify_ssl': 'true'})
@ -16,7 +17,6 @@ export function setup() {
const params = { const params = {
acl: 'private', acl: 'private',
lock_enabled: 'true', lock_enabled: 'true',
location_constraint: 'ru'
} }
const res = s3_cli.createBucket(bucket, params) const res = s3_cli.createBucket(bucket, params)
@ -27,6 +27,7 @@ export function setup() {
export default function () { export default function () {
const key = uuidv4(); const key = uuidv4();
const payload = generator.genPayload()
if (s3_cli.put(bucket, key, payload).success) { if (s3_cli.put(bucket, key, payload).success) {
s3_cli.get(bucket, key) s3_cli.get(bucket, key)
} }

View file

@ -1,14 +1,16 @@
import s3local from 'k6/x/frostfs/s3local'; import s3local from 'k6/x/frostfs/s3local';
import datagen from 'k6/x/frostfs/datagen';
import { uuidv4 } from '../scenarios/libs/k6-utils-1.4.0.js'; import { uuidv4 } from '../scenarios/libs/k6-utils-1.4.0.js';
const bucket = "testbucket" const bucket = "testbucket"
const payload = open('../go.sum', 'b'); const generator = datagen.generator(1024, "random", false);
const s3local_cli = s3local.connect("path/to/storage/config.yml", "path/to/storage/config/dir", {}, { const s3local_cli = s3local.connect("path/to/storage/config.yml", "path/to/storage/config/dir", {}, {
'testbucket': 'GBQDDUM1hdodXmiRHV57EUkFWJzuntsG8BG15wFSwam6', 'testbucket': 'GBQDDUM1hdodXmiRHV57EUkFWJzuntsG8BG15wFSwam6',
}); });
export default function () { export default function () {
const key = uuidv4(); const key = uuidv4();
const payload = generator.genPayload()
if (s3local_cli.put(bucket, key, payload).success) { if (s3local_cli.put(bucket, key, payload).success) {
s3local_cli.get(bucket, key) s3local_cli.get(bucket, key)
} }

148
go.mod
View file

@ -1,133 +1,141 @@
module git.frostfs.info/TrueCloudLab/xk6-frostfs module git.frostfs.info/TrueCloudLab/xk6-frostfs
go 1.21 go 1.22
require ( require (
git.frostfs.info/TrueCloudLab/frostfs-node v0.38.3-0.20240502170333-ec2873caa7c6 git.frostfs.info/TrueCloudLab/frostfs-node v0.44.1-0.20250113100501-6c51f48aab69
git.frostfs.info/TrueCloudLab/frostfs-s3-gw v0.29.0-rc.1.0.20240422122918-034396d554ec git.frostfs.info/TrueCloudLab/frostfs-s3-gw v0.32.0
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240502080121-12ddefe07877 git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250109084609-328d214d2d76
git.frostfs.info/TrueCloudLab/tzhash v1.8.0 git.frostfs.info/TrueCloudLab/tzhash v1.8.0
github.com/aws/aws-sdk-go-v2 v1.19.0 github.com/aws/aws-sdk-go-v2 v1.32.8
github.com/aws/aws-sdk-go-v2/config v1.18.28 github.com/aws/aws-sdk-go-v2/config v1.28.9
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.72 github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.47
github.com/aws/aws-sdk-go-v2/service/s3 v1.37.0 github.com/aws/aws-sdk-go-v2/service/s3 v1.72.2
github.com/dop251/goja v0.0.0-20230626124041-ba8a63e79201 github.com/dop251/goja v0.0.0-20230626124041-ba8a63e79201
github.com/go-loremipsum/loremipsum v1.1.3 github.com/go-loremipsum/loremipsum v1.1.3
github.com/google/uuid v1.6.0 github.com/google/uuid v1.6.0
github.com/joho/godotenv v1.5.1 github.com/joho/godotenv v1.5.1
github.com/nspcc-dev/neo-go v0.105.1 github.com/nspcc-dev/neo-go v0.106.3
github.com/panjf2000/ants/v2 v2.9.0 github.com/panjf2000/ants/v2 v2.9.0
github.com/sirupsen/logrus v1.9.3 github.com/sirupsen/logrus v1.9.3
github.com/spf13/cobra v1.8.0 github.com/spf13/cobra v1.8.1
github.com/stretchr/testify v1.8.4 github.com/stretchr/testify v1.9.0
go.etcd.io/bbolt v1.3.8 go.etcd.io/bbolt v1.3.10
go.k6.io/k6 v0.45.1 go.k6.io/k6 v0.45.1
go.uber.org/zap v1.26.0 go.uber.org/zap v1.27.0
golang.org/x/sys v0.18.0 golang.org/x/sys v0.28.0
) )
require ( require (
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240427200446-67c6f305b21f // indirect git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1-0.20241205083807-762d7f9f9f08 // indirect
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240409115729-6eb492025bdd // indirect
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 // indirect git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 // indirect
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20231101111734-b3ad3335ff65 // indirect git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241112082307-f17779933e88 // indirect
git.frostfs.info/TrueCloudLab/hrw v1.2.1 // indirect git.frostfs.info/TrueCloudLab/hrw v1.2.1 // indirect
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240416071728-04a79f57ef1f // indirect git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240822104152-a3bc3099bd5b // indirect
git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0 // indirect git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0 // indirect
git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02 // indirect git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02 // indirect
github.com/antlr4-go/antlr/v4 v4.13.0 // indirect github.com/VictoriaMetrics/easyproto v0.1.4 // indirect
github.com/aws/aws-sdk-go v1.44.296 // indirect github.com/antlr4-go/antlr/v4 v4.13.1 // indirect
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.7 // indirect
github.com/aws/aws-sdk-go-v2/credentials v1.13.27 // indirect github.com/aws/aws-sdk-go-v2/credentials v1.17.50 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.5 // indirect github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.23 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.35 // indirect github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.27 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.29 // indirect github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.27 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.36 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 // indirect
github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.27 // indirect github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.27 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.11 // indirect github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.30 // indirect github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.8 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.29 // indirect github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.8 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.14.4 // indirect github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.8 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.12.13 // indirect github.com/aws/aws-sdk-go-v2/service/sso v1.24.9 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.13 // indirect github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.8 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.19.3 // indirect github.com/aws/aws-sdk-go-v2/service/sts v1.33.5 // indirect
github.com/aws/smithy-go v1.13.5 // indirect github.com/aws/smithy-go v1.22.1 // indirect
github.com/beorn7/perks v1.0.1 // indirect github.com/beorn7/perks v1.0.1 // indirect
github.com/bluele/gcache v0.0.2 // indirect github.com/bluele/gcache v0.0.2 // indirect
github.com/cenkalti/backoff/v4 v4.2.1 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect
github.com/dlclark/regexp2 v1.10.0 // indirect github.com/dlclark/regexp2 v1.10.0 // indirect
github.com/fatih/color v1.15.0 // indirect github.com/fatih/color v1.15.0 // indirect
github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect
github.com/go-chi/chi/v5 v5.0.8 // indirect github.com/go-chi/chi/v5 v5.0.8 // indirect
github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/logr v1.4.2 // indirect
github.com/go-logr/stdr v1.2.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-pkgz/expirable-cache/v3 v3.0.0 // indirect github.com/go-pkgz/expirable-cache/v3 v3.0.0 // indirect
github.com/go-sourcemap/sourcemap v2.1.4-0.20211119122758-180fcef48034+incompatible // indirect github.com/go-sourcemap/sourcemap v2.1.4-0.20211119122758-180fcef48034+incompatible // indirect
github.com/golang/snappy v0.0.4 // indirect github.com/golang/snappy v0.0.4 // indirect
github.com/google/pprof v0.0.0-20230817174616-7a8ec2ada47b // indirect github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect
github.com/hashicorp/golang-lru v1.0.2 // indirect
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect github.com/hashicorp/hcl v1.0.0 // indirect
github.com/holiman/uint256 v1.2.4 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/ipfs/go-cid v0.4.1 // indirect
github.com/josharian/intern v1.0.0 // indirect github.com/josharian/intern v1.0.0 // indirect
github.com/klauspost/compress v1.17.4 // indirect github.com/klauspost/compress v1.17.4 // indirect
github.com/klauspost/cpuid/v2 v2.2.6 // indirect
github.com/magiconair/properties v1.8.7 // indirect github.com/magiconair/properties v1.8.7 // indirect
github.com/mailru/easyjson v0.7.7 // indirect github.com/mailru/easyjson v0.7.7 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.19 // indirect github.com/mattn/go-isatty v0.0.19 // indirect
github.com/minio/sio v0.3.1 // indirect github.com/minio/sha256-simd v1.0.1 // indirect
github.com/minio/sio v0.3.0 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/mr-tron/base58 v1.2.0 // indirect github.com/mr-tron/base58 v1.2.0 // indirect
github.com/mstoykov/atlas v0.0.0-20220811071828-388f114305dd // indirect github.com/mstoykov/atlas v0.0.0-20220811071828-388f114305dd // indirect
github.com/nats-io/nats.go v1.32.0 // indirect github.com/multiformats/go-base32 v0.1.0 // indirect
github.com/nats-io/nkeys v0.4.7 // indirect github.com/multiformats/go-base36 v0.2.0 // indirect
github.com/nats-io/nuid v1.0.1 // indirect github.com/multiformats/go-multiaddr v0.14.0 // indirect
github.com/nspcc-dev/go-ordered-json v0.0.0-20240112074137-296698a162ae // indirect github.com/multiformats/go-multibase v0.2.0 // indirect
github.com/nspcc-dev/rfc6979 v0.2.0 // indirect github.com/multiformats/go-multihash v0.2.3 // indirect
github.com/multiformats/go-varint v0.0.7 // indirect
github.com/nspcc-dev/go-ordered-json v0.0.0-20240301084351-0246b013f8b2 // indirect
github.com/nspcc-dev/rfc6979 v0.2.1 // indirect
github.com/onsi/gomega v1.20.2 // indirect github.com/onsi/gomega v1.20.2 // indirect
github.com/pelletier/go-toml/v2 v2.1.1 // indirect github.com/pelletier/go-toml/v2 v2.2.2 // indirect
github.com/pierrec/lz4 v2.6.1+incompatible // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/prometheus/client_golang v1.18.0 // indirect github.com/prometheus/client_golang v1.19.0 // indirect
github.com/prometheus/client_model v0.5.0 // indirect github.com/prometheus/client_model v0.5.0 // indirect
github.com/prometheus/common v0.46.0 // indirect github.com/prometheus/common v0.48.0 // indirect
github.com/prometheus/procfs v0.12.0 // indirect github.com/prometheus/procfs v0.12.0 // indirect
github.com/sagikazarmark/locafero v0.4.0 // indirect github.com/sagikazarmark/locafero v0.6.0 // indirect
github.com/sagikazarmark/slog-shim v0.1.0 // indirect github.com/sagikazarmark/slog-shim v0.1.0 // indirect
github.com/serenize/snaker v0.0.0-20201027110005-a7ad2135616e // indirect github.com/serenize/snaker v0.0.0-20201027110005-a7ad2135616e // indirect
github.com/sourcegraph/conc v0.3.0 // indirect github.com/sourcegraph/conc v0.3.0 // indirect
github.com/spaolacci/murmur3 v1.1.0 // indirect
github.com/spf13/afero v1.11.0 // indirect github.com/spf13/afero v1.11.0 // indirect
github.com/spf13/cast v1.6.0 // indirect github.com/spf13/cast v1.6.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect github.com/spf13/pflag v1.0.5 // indirect
github.com/spf13/viper v1.18.2 // indirect github.com/spf13/viper v1.19.0 // indirect
github.com/ssgreg/journald v1.0.0 // indirect github.com/ssgreg/journald v1.0.0 // indirect
github.com/subosito/gotenv v1.6.0 // indirect github.com/subosito/gotenv v1.6.0 // indirect
github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect
github.com/twmb/murmur3 v1.1.8 // indirect github.com/twmb/murmur3 v1.1.8 // indirect
go.opentelemetry.io/otel v1.22.0 // indirect go.opentelemetry.io/otel v1.31.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.22.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 // indirect
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.22.0 // indirect go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0 // indirect
go.opentelemetry.io/otel/metric v1.22.0 // indirect go.opentelemetry.io/otel/metric v1.31.0 // indirect
go.opentelemetry.io/otel/sdk v1.22.0 // indirect go.opentelemetry.io/otel/sdk v1.31.0 // indirect
go.opentelemetry.io/otel/trace v1.22.0 // indirect go.opentelemetry.io/otel/trace v1.31.0 // indirect
go.opentelemetry.io/proto/otlp v1.1.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect
go.uber.org/multierr v1.11.0 // indirect go.uber.org/multierr v1.11.0 // indirect
golang.org/x/crypto v0.21.0 // indirect golang.org/x/crypto v0.31.0 // indirect
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a // indirect golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect
golang.org/x/net v0.23.0 // indirect golang.org/x/net v0.30.0 // indirect
golang.org/x/sync v0.6.0 // indirect golang.org/x/sync v0.10.0 // indirect
golang.org/x/text v0.14.0 // indirect golang.org/x/text v0.21.0 // indirect
golang.org/x/time v0.5.0 // indirect golang.org/x/time v0.5.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de // indirect google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 // indirect
google.golang.org/grpc v1.63.2 // indirect google.golang.org/grpc v1.69.2 // indirect
google.golang.org/protobuf v1.33.0 // indirect google.golang.org/protobuf v1.36.1 // indirect
gopkg.in/guregu/null.v3 v3.5.0 // indirect gopkg.in/guregu/null.v3 v3.5.0 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect
lukechampine.com/blake3 v1.2.1 // indirect
) )

BIN
go.sum

Binary file not shown.

View file

@ -6,7 +6,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/metrics"
) )
var ( var (
@ -37,16 +36,17 @@ type sizeLimiter struct {
currentSize *atomic.Int64 currentSize *atomic.Int64
} }
func (*sizeLimiter) SetEvacuationInProgress(shardID string, value bool) {}
func (*sizeLimiter) AddMethodDuration(method string, d time.Duration) {} func (*sizeLimiter) AddMethodDuration(method string, d time.Duration) {}
func (*sizeLimiter) AddToContainerSize(cnrID string, size int64) {} func (*sizeLimiter) AddToContainerSize(cnrID string, size int64) {}
func (*sizeLimiter) AddToObjectCounter(shardID string, objectType string, delta int) {} func (*sizeLimiter) AddToObjectCounter(shardID string, objectType string, delta int) {}
func (*sizeLimiter) ClearErrorCounter(shardID string) {} func (*sizeLimiter) ClearErrorCounter(shardID string) {}
func (*sizeLimiter) DeleteShardMetrics(shardID string) {} func (*sizeLimiter) DeleteShardMetrics(shardID string) {}
func (*sizeLimiter) GC() metrics.GCMetrics { return &noopGCMetrics{} } func (*sizeLimiter) GC() engine.GCMetrics { return &noopGCMetrics{} }
func (*sizeLimiter) IncErrorCounter(shardID string) {} func (*sizeLimiter) IncErrorCounter(shardID string) {}
func (*sizeLimiter) SetMode(shardID string, mode mode.Mode) {} func (*sizeLimiter) SetMode(shardID string, mode mode.Mode) {}
func (*sizeLimiter) SetObjectCounter(shardID string, objectType string, v uint64) {} func (*sizeLimiter) SetObjectCounter(shardID string, objectType string, v uint64) {}
func (*sizeLimiter) WriteCache() metrics.WriteCacheMetrics { return &noopWriteCacheMetrics{} } func (*sizeLimiter) WriteCache() engine.WriteCacheMetrics { return &noopWriteCacheMetrics{} }
func (*sizeLimiter) DeleteContainerSize(cnrID string) {} func (*sizeLimiter) DeleteContainerSize(cnrID string) {}
func (*sizeLimiter) DeleteContainerCount(cnrID string) {} func (*sizeLimiter) DeleteContainerCount(cnrID string) {}
func (*sizeLimiter) SetContainerObjectCounter(_, _, _ string, _ uint64) {} func (*sizeLimiter) SetContainerObjectCounter(_, _, _ string, _ uint64) {}
@ -67,17 +67,18 @@ func (sl *sizeLimiter) IsFull() bool {
type noopLimiter struct{} type noopLimiter struct{}
func (*noopLimiter) SetEvacuationInProgress(shardID string, value bool) {}
func (*noopLimiter) AddMethodDuration(method string, d time.Duration) {} func (*noopLimiter) AddMethodDuration(method string, d time.Duration) {}
func (*noopLimiter) AddToContainerSize(cnrID string, size int64) {} func (*noopLimiter) AddToContainerSize(cnrID string, size int64) {}
func (*noopLimiter) AddToObjectCounter(shardID string, objectType string, delta int) {} func (*noopLimiter) AddToObjectCounter(shardID string, objectType string, delta int) {}
func (*noopLimiter) AddToPayloadCounter(shardID string, size int64) {} func (*noopLimiter) AddToPayloadCounter(shardID string, size int64) {}
func (*noopLimiter) ClearErrorCounter(shardID string) {} func (*noopLimiter) ClearErrorCounter(shardID string) {}
func (*noopLimiter) DeleteShardMetrics(shardID string) {} func (*noopLimiter) DeleteShardMetrics(shardID string) {}
func (*noopLimiter) GC() metrics.GCMetrics { return &noopGCMetrics{} } func (*noopLimiter) GC() engine.GCMetrics { return &noopGCMetrics{} }
func (*noopLimiter) IncErrorCounter(shardID string) {} func (*noopLimiter) IncErrorCounter(shardID string) {}
func (*noopLimiter) SetMode(shardID string, mode mode.Mode) {} func (*noopLimiter) SetMode(shardID string, mode mode.Mode) {}
func (*noopLimiter) SetObjectCounter(shardID string, objectType string, v uint64) {} func (*noopLimiter) SetObjectCounter(shardID string, objectType string, v uint64) {}
func (*noopLimiter) WriteCache() metrics.WriteCacheMetrics { return &noopWriteCacheMetrics{} } func (*noopLimiter) WriteCache() engine.WriteCacheMetrics { return &noopWriteCacheMetrics{} }
func (*noopLimiter) IsFull() bool { return false } func (*noopLimiter) IsFull() bool { return false }
func (*noopLimiter) DeleteContainerSize(cnrID string) {} func (*noopLimiter) DeleteContainerSize(cnrID string) {}
func (*noopLimiter) DeleteContainerCount(cnrID string) {} func (*noopLimiter) DeleteContainerCount(cnrID string) {}
@ -99,7 +100,7 @@ type noopWriteCacheMetrics struct{}
func (*noopWriteCacheMetrics) AddMethodDuration(_, _, _, _ string, _ bool, _ time.Duration) {} func (*noopWriteCacheMetrics) AddMethodDuration(_, _, _, _ string, _ bool, _ time.Duration) {}
func (*noopWriteCacheMetrics) Close(_, _ string) {} func (*noopWriteCacheMetrics) Close(_, _ string) {}
func (*noopWriteCacheMetrics) IncOperationCounter(_, _, _, _ string, _ metrics.NullBool) {} func (*noopWriteCacheMetrics) IncOperationCounter(_, _, _, _ string, _ engine.NullBool) {}
func (*noopWriteCacheMetrics) SetActualCount(_, _, _ string, count uint64) {} func (*noopWriteCacheMetrics) SetActualCount(_, _, _ string, count uint64) {}
func (*noopWriteCacheMetrics) SetEstimateSize(_, _, _ string, _ uint64) {} func (*noopWriteCacheMetrics) SetEstimateSize(_, _, _ string, _ uint64) {}
func (*noopWriteCacheMetrics) SetMode(shardID string, mode string) {} func (*noopWriteCacheMetrics) SetMode(shardID string, mode string) {}

View file

@ -30,7 +30,6 @@ import (
"go.etcd.io/bbolt" "go.etcd.io/bbolt"
"go.k6.io/k6/js/modules" "go.k6.io/k6/js/modules"
"go.k6.io/k6/metrics" "go.k6.io/k6/metrics"
"go.uber.org/zap"
"golang.org/x/sys/unix" "golang.org/x/sys/unix"
) )
@ -232,29 +231,30 @@ func (epochState) CurrentEpoch() uint64 { return 0 }
// //
// Note that the configuration file only needs to contain the storage-specific sections. // Note that the configuration file only needs to contain the storage-specific sections.
func storageEngineOptionsFromConfig(ctx context.Context, c *config.Config, debug bool, l Limiter) ([]engine.Option, [][]shard.Option, error) { func storageEngineOptionsFromConfig(ctx context.Context, c *config.Config, debug bool, l Limiter) ([]engine.Option, [][]shard.Option, error) {
log := zap.L() prm := logger.Prm{}
_ = prm.SetDestination("stdout")
if debug { if debug {
var err error _ = prm.SetLevelString("debug")
log, err = zap.NewDevelopment()
if err != nil {
return nil, nil, fmt.Errorf("creating development logger: %v", err)
} }
lg, err := logger.NewLogger(&prm)
if err != nil {
return nil, nil, err
} }
ngOpts := []engine.Option{ ngOpts := []engine.Option{
engine.WithErrorThreshold(engineconfig.ShardErrorThreshold(c)), engine.WithErrorThreshold(engineconfig.ShardErrorThreshold(c)),
engine.WithShardPoolSize(engineconfig.ShardPoolSize(c)), engine.WithShardPoolSize(engineconfig.ShardPoolSize(c)),
engine.WithLogger(&logger.Logger{Logger: log}), engine.WithLogger(lg),
engine.WithMetrics(l), engine.WithMetrics(l),
} }
var shOpts [][]shard.Option var shOpts [][]shard.Option
err := engineconfig.IterateShards(c, false, func(sc *shardconfig.Config) error { err = engineconfig.IterateShards(c, false, func(sc *shardconfig.Config) error {
opts := []shard.Option{ opts := []shard.Option{
shard.WithRefillMetabase(sc.RefillMetabase()), shard.WithRefillMetabase(sc.RefillMetabase()),
shard.WithMode(sc.Mode()), shard.WithMode(sc.Mode()),
shard.WithLogger(&logger.Logger{Logger: log}), shard.WithLogger(lg),
} }
// substorages // substorages
@ -273,7 +273,7 @@ func storageEngineOptionsFromConfig(ctx context.Context, c *config.Config, debug
blobovniczatree.WithBlobovniczaShallowDepth(cfg.ShallowDepth()), blobovniczatree.WithBlobovniczaShallowDepth(cfg.ShallowDepth()),
blobovniczatree.WithBlobovniczaShallowWidth(cfg.ShallowWidth()), blobovniczatree.WithBlobovniczaShallowWidth(cfg.ShallowWidth()),
blobovniczatree.WithOpenedCacheSize(cfg.OpenedCacheSize()), blobovniczatree.WithOpenedCacheSize(cfg.OpenedCacheSize()),
blobovniczatree.WithLogger(&logger.Logger{Logger: log}), blobovniczatree.WithLogger(lg),
), ),
Policy: func(_ *objectSDK.Object, data []byte) bool { Policy: func(_ *objectSDK.Object, data []byte) bool {
return uint64(len(data)) < sc.SmallSizeLimit() return uint64(len(data)) < sc.SmallSizeLimit()
@ -302,7 +302,7 @@ func storageEngineOptionsFromConfig(ctx context.Context, c *config.Config, debug
blobstor.WithCompressObjects(sc.Compress()), blobstor.WithCompressObjects(sc.Compress()),
blobstor.WithUncompressableContentTypes(sc.UncompressableContentTypes()), blobstor.WithUncompressableContentTypes(sc.UncompressableContentTypes()),
blobstor.WithStorages(substorages), blobstor.WithStorages(substorages),
blobstor.WithLogger(&logger.Logger{Logger: log}), blobstor.WithLogger(lg),
)) ))
} }
@ -313,14 +313,11 @@ func storageEngineOptionsFromConfig(ctx context.Context, c *config.Config, debug
shard.WithWriteCacheOptions( shard.WithWriteCacheOptions(
[]writecache.Option{ []writecache.Option{
writecache.WithPath(wc.Path()), writecache.WithPath(wc.Path()),
writecache.WithMaxBatchSize(wc.BoltDB().MaxBatchSize()),
writecache.WithMaxBatchDelay(wc.BoltDB().MaxBatchDelay()),
writecache.WithMaxObjectSize(wc.MaxObjectSize()), writecache.WithMaxObjectSize(wc.MaxObjectSize()),
writecache.WithSmallObjectSize(wc.SmallObjectSize()),
writecache.WithFlushWorkersCount(wc.WorkerCount()), writecache.WithFlushWorkersCount(wc.WorkerCount()),
writecache.WithMaxCacheSize(wc.SizeLimit()), writecache.WithMaxCacheSize(wc.SizeLimit()),
writecache.WithNoSync(wc.NoSync()), writecache.WithNoSync(wc.NoSync()),
writecache.WithLogger(&logger.Logger{Logger: log}), writecache.WithLogger(lg),
}, },
), ),
) )
@ -350,7 +347,7 @@ func storageEngineOptionsFromConfig(ctx context.Context, c *config.Config, debug
Timeout: 1 * time.Second, Timeout: 1 * time.Second,
}), }),
metabase.WithEpochState(epochState{}), metabase.WithEpochState(epochState{}),
metabase.WithLogger(&logger.Logger{Logger: log}), metabase.WithLogger(lg),
)) ))
} }

View file

@ -69,7 +69,7 @@ func (c *RawClient) Put(ctx context.Context, containerID cid.ID, ownerID *user.I
} }
var req engine.PutPrm var req engine.PutPrm
req.WithObject(obj) req.Object = obj
start := time.Now() start := time.Now()
err = c.ng.Put(ctx, req) err = c.ng.Put(ctx, req)

View file

@ -1,6 +1,10 @@
package logging package logging
import ( import (
"fmt"
"strings"
"time"
"github.com/dop251/goja" "github.com/dop251/goja"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"go.k6.io/k6/js/modules" "go.k6.io/k6/js/modules"
@ -55,14 +59,29 @@ func (r *RootModule) NewModuleInstance(vu modules.VU) modules.Instance {
return &Logging{vu: vu} return &Logging{vu: vu}
} }
tsFormat, disableTs := time.TimeOnly, false
if val, ok := vu.InitEnv().LookupEnv("DATE_FORMAT"); ok {
switch strings.ToLower(val) {
case "timeonly":
case "datetime":
tsFormat = time.DateTime
case "none":
disableTs = true
default:
panic(fmt.Sprintf("invalid value for DATE_FORMAT: %s (should be `timeonly`, `datetime` or `none`)", val))
}
}
format := lg.Formatter format := lg.Formatter
switch f := format.(type) { switch f := format.(type) {
case *logrus.TextFormatter: case *logrus.TextFormatter:
f.ForceColors = true f.ForceColors = true
f.FullTimestamp = true f.FullTimestamp = true
f.TimestampFormat = "15:04:05" f.TimestampFormat = tsFormat
f.DisableTimestamp = disableTs
case *logrus.JSONFormatter: case *logrus.JSONFormatter:
f.TimestampFormat = "15:04:05" f.TimestampFormat = tsFormat
f.DisableTimestamp = disableTs
} }
return &Logging{vu: vu} return &Logging{vu: vu}

58
internal/native/cache.go Normal file
View file

@ -0,0 +1,58 @@
package native
import (
"context"
"sync"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
)
const networkCacheTTL = time.Minute
var networkInfoCache = &networkInfoCacheT{}
type networkInfoCacheT struct {
guard sync.RWMutex
current *netmap.NetworkInfo
fetchTS time.Time
}
func (c *networkInfoCacheT) getOrFetch(ctx context.Context, cli *client.Client) (*netmap.NetworkInfo, error) {
if v := c.get(); v != nil {
return v, nil
}
return c.fetch(ctx, cli)
}
func (c *networkInfoCacheT) get() *netmap.NetworkInfo {
c.guard.RLock()
defer c.guard.RUnlock()
if c.current == nil || time.Since(c.fetchTS) > networkCacheTTL {
return nil
}
return c.current
}
func (c *networkInfoCacheT) fetch(ctx context.Context, cli *client.Client) (*netmap.NetworkInfo, error) {
c.guard.Lock()
defer c.guard.Unlock()
if time.Since(c.fetchTS) <= networkCacheTTL {
return c.current, nil
}
res, err := cli.NetworkInfo(ctx, client.PrmNetworkInfo{})
if err != nil {
return nil, err
}
v := res.Info()
c.current = &v
c.fetchTS = time.Now()
return c.current, nil
}

View file

@ -13,7 +13,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@ -35,6 +34,7 @@ type (
tok session.Object tok session.Object
cli *client.Client cli *client.Client
prepareLocally bool prepareLocally bool
maxObjSize uint64
} }
PutResponse struct { PutResponse struct {
@ -71,6 +71,7 @@ type (
hdr object.Object hdr object.Object
payload []byte payload []byte
prepareLocally bool prepareLocally bool
maxObjSize uint64
} }
) )
@ -103,7 +104,7 @@ func (c *Client) Put(containerID string, headers map[string]string, payload data
o.SetOwnerID(owner) o.SetOwnerID(owner)
o.SetAttributes(attrs...) o.SetAttributes(attrs...)
resp, err := put(c.vu, c.cli, c.prepareLocally, &tok, &o, payload, chunkSize) resp, err := put(c.vu, c.cli, c.prepareLocally, &tok, &o, payload, chunkSize, c.maxObjSize)
if err != nil { if err != nil {
return PutResponse{Success: false, Error: err.Error()} return PutResponse{Success: false, Error: err.Error()}
} }
@ -263,16 +264,6 @@ func (c *Client) PutContainer(params map[string]string) PutContainerResponse {
container.SetCreationTime(&cnr, time.Now()) container.SetCreationTime(&cnr, time.Now())
cnr.SetOwner(usr) cnr.SetOwner(usr)
if basicACLStr, ok := params["acl"]; ok {
var basicACL acl.Basic
err := basicACL.DecodeString(basicACLStr)
if err != nil {
return c.putCnrErrorResponse(err)
}
cnr.SetBasicACL(basicACL)
}
placementPolicyStr, ok := params["placement_policy"] placementPolicyStr, ok := params["placement_policy"]
if ok { if ok {
var placementPolicy netmap.PlacementPolicy var placementPolicy netmap.PlacementPolicy
@ -373,6 +364,7 @@ func (c *Client) Onsite(containerID string, payload datagen.Payload) PreparedObj
hdr: *obj, hdr: *obj,
payload: data, payload: data,
prepareLocally: c.prepareLocally, prepareLocally: c.prepareLocally,
maxObjSize: c.maxObjSize,
} }
} }
@ -398,7 +390,7 @@ func (p PreparedObject) Put(headers map[string]string) PutResponse {
return PutResponse{Success: false, Error: err.Error()} return PutResponse{Success: false, Error: err.Error()}
} }
_, err = put(p.vu, p.cli, p.prepareLocally, nil, &obj, datagen.NewFixedPayload(p.payload), 0) _, err = put(p.vu, p.cli, p.prepareLocally, nil, &obj, datagen.NewFixedPayload(p.payload), 0, p.maxObjSize)
if err != nil { if err != nil {
return PutResponse{Success: false, Error: err.Error()} return PutResponse{Success: false, Error: err.Error()}
} }
@ -413,7 +405,7 @@ func (s epochSource) CurrentEpoch() uint64 {
} }
func put(vu modules.VU, cli *client.Client, prepareLocally bool, tok *session.Object, func put(vu modules.VU, cli *client.Client, prepareLocally bool, tok *session.Object,
hdr *object.Object, payload datagen.Payload, chunkSize int, hdr *object.Object, payload datagen.Payload, chunkSize int, maxObjSize uint64,
) (*client.ResObjectPut, error) { ) (*client.ResObjectPut, error) {
bufSize := defaultBufferSize bufSize := defaultBufferSize
if chunkSize > 0 { if chunkSize > 0 {
@ -434,13 +426,16 @@ func put(vu modules.VU, cli *client.Client, prepareLocally bool, tok *session.Ob
prm.MaxChunkLength = chunkSize prm.MaxChunkLength = chunkSize
} }
if prepareLocally { if prepareLocally {
res, err := cli.NetworkInfo(vu.Context(), client.PrmNetworkInfo{}) ni, err := networkInfoCache.getOrFetch(vu.Context(), cli)
if err != nil { if err != nil {
return nil, err return nil, err
} }
prm.MaxSize = res.Info().MaxObjectSize() prm.MaxSize = ni.MaxObjectSize()
prm.EpochSource = epochSource(res.Info().CurrentEpoch()) prm.EpochSource = epochSource(ni.CurrentEpoch())
prm.WithoutHomomorphHash = true prm.WithoutHomomorphHash = true
if maxObjSize > 0 {
prm.MaxSize = maxObjSize
}
} }
objectWriter, err := cli.ObjectPutInit(vu.Context(), prm) objectWriter, err := cli.ObjectPutInit(vu.Context(), prm)

View file

@ -52,13 +52,17 @@ func (n *Native) Exports() modules.Exports {
return modules.Exports{Default: n} return modules.Exports{Default: n}
} }
func (n *Native) Connect(endpoint, hexPrivateKey string, dialTimeout, streamTimeout int, prepareLocally bool) (*Client, error) { func (n *Native) Connect(endpoint, hexPrivateKey string, dialTimeout, streamTimeout int, prepareLocally bool, maxObjSize int) (*Client, error) {
var ( var (
cli client.Client cli client.Client
pk *keys.PrivateKey pk *keys.PrivateKey
err error err error
) )
if maxObjSize < 0 {
return nil, fmt.Errorf("max object size value must be positive")
}
pk, err = keys.NewPrivateKey() pk, err = keys.NewPrivateKey()
if len(hexPrivateKey) != 0 { if len(hexPrivateKey) != 0 {
pk, err = keys.NewPrivateKeyFromHex(hexPrivateKey) pk, err = keys.NewPrivateKeyFromHex(hexPrivateKey)
@ -114,6 +118,21 @@ func (n *Native) Connect(endpoint, hexPrivateKey string, dialTimeout, streamTime
tok.SetAuthKey(&key) tok.SetAuthKey(&key)
tok.SetExp(exp) tok.SetExp(exp)
res, err := cli.NetworkInfo(n.vu.Context(), client.PrmNetworkInfo{})
if err != nil {
return nil, err
}
prevEpoch := res.Info().CurrentEpoch() - 1
tok.SetNbf(prevEpoch)
tok.SetIat(prevEpoch)
if prepareLocally && maxObjSize > 0 {
if uint64(maxObjSize) > res.Info().MaxObjectSize() {
return nil, fmt.Errorf("max object size must be not greater than %d bytes", res.Info().MaxObjectSize())
}
}
// register metrics // register metrics
objPutSuccess, _ = stats.Registry.NewMetric("frostfs_obj_put_success", metrics.Counter) objPutSuccess, _ = stats.Registry.NewMetric("frostfs_obj_put_success", metrics.Counter)
@ -140,5 +159,6 @@ func (n *Native) Connect(endpoint, hexPrivateKey string, dialTimeout, streamTime
tok: tok, tok: tok,
cli: &cli, cli: &cli,
prepareLocally: prepareLocally, prepareLocally: prepareLocally,
maxObjSize: uint64(maxObjSize),
}, nil }, nil
} }

View file

@ -11,6 +11,7 @@ type ObjExporter struct {
type PreGenerateInfo struct { type PreGenerateInfo struct {
Buckets []string `json:"buckets"` Buckets []string `json:"buckets"`
Containers []string `json:"containers"`
Objects []ObjInfo `json:"objects"` Objects []ObjInfo `json:"objects"`
ObjSize string `json:"obj_size"` ObjSize string `json:"obj_size"`
} }
@ -18,6 +19,8 @@ type PreGenerateInfo struct {
type ObjInfo struct { type ObjInfo struct {
Bucket string `json:"bucket"` Bucket string `json:"bucket"`
Object string `json:"object"` Object string `json:"object"`
CID string `json:"cid"`
OID string `json:"oid"`
} }
func NewObjExporter(selector *ObjSelector) *ObjExporter { func NewObjExporter(selector *ObjSelector) *ObjExporter {

View file

@ -0,0 +1,156 @@
package registry
import (
"context"
"encoding/json"
"fmt"
"math/rand"
"os"
"path/filepath"
"slices"
"strings"
"testing"
"github.com/stretchr/testify/require"
)
type expectedResult struct {
mode string
objects []ObjectInfo
dir string
dbName string
jsonName string
}
func TestObjectExporter(t *testing.T) {
names := []string{"s3", "grpc"}
for _, name := range names {
t.Run(name, runExportTest)
t.Run(name+"-changed", runExportChangedTest)
t.Run(name+"-empty", runExportEmptyTest)
}
}
func runExportTest(t *testing.T) {
expected := getExpectedResult(t)
objReg := getFilledRegistry(t, expected)
objExp := NewObjExporter(NewObjSelector(objReg, 0, SelectorOneshot, &ObjFilter{Status: statusCreated}))
require.NoError(t, objExp.ExportJSONPreGen(expected.jsonName))
require.NoError(t, checkExported(expected.objects, expected.jsonName))
}
func runExportChangedTest(t *testing.T) {
expected := getExpectedResult(t)
objReg := getFilledRegistry(t, expected)
newStatus := randString(10)
num := randPositiveInt(1, len(expected.objects))
changedObjects := make([]ObjectInfo, num)
require.Equal(t, num, copy(changedObjects[:], expected.objects[:]))
sel := NewObjSelector(objReg, 0, SelectorOneshot, &ObjFilter{Status: statusCreated})
for i := range changedObjects {
changedObjects[i].Status = newStatus
require.NoError(t, objReg.SetObjectStatus(sel.NextObject().Id, statusCreated, newStatus))
}
objExp := NewObjExporter(NewObjSelector(objReg, 0, SelectorOneshot, &ObjFilter{Status: newStatus}))
require.NoError(t, objExp.ExportJSONPreGen(expected.jsonName))
require.NoError(t, checkExported(changedObjects, expected.jsonName))
}
func runExportEmptyTest(t *testing.T) {
expected := getExpectedResult(t)
expected.objects = make([]ObjectInfo, 0)
objReg := getFilledRegistry(t, expected)
objExp := NewObjExporter(NewObjSelector(objReg, 0, SelectorOneshot, &ObjFilter{Status: statusCreated}))
require.NoError(t, objExp.ExportJSONPreGen(expected.jsonName))
require.NoError(t, checkExported(expected.objects, expected.jsonName))
}
func getExpectedResult(t *testing.T) expectedResult {
num := randPositiveInt(2, 100)
mode := getMode(t.Name())
require.NotEqual(t, "", mode, "test mode should contain either \"s3\" or\"grpc\"")
dir := t.TempDir()
res := expectedResult{
mode: mode,
objects: generateObjectInfo(num, t.Name()),
dir: dir,
dbName: filepath.Join(dir, "registry-"+mode+".db"),
jsonName: filepath.Join(dir, "registry-"+mode+".json"),
}
return res
}
func randPositiveInt(min, max int) int {
return rand.Intn(max-min) + min
}
func getMode(name string) (res string) {
if strings.Contains(name, "s3") {
res = filepath.Base(name)
}
if strings.Contains(name, "grpc") {
res = filepath.Base(name)
}
return res
}
func generateObjectInfo(num int, mode string) []ObjectInfo {
res := make([]ObjectInfo, num)
for i := range res {
res[i] = randomObjectInfo()
if !strings.Contains(mode, "s3") {
res[i].S3Bucket = ""
res[i].S3Key = ""
}
if !strings.Contains(mode, "grpc") {
res[i].CID = ""
res[i].OID = ""
}
}
return res
}
func getFilledRegistry(t *testing.T, expected expectedResult) *ObjRegistry {
objReg := NewObjRegistry(context.Background(), expected.dbName)
for i := range expected.objects {
require.NoError(t, objReg.AddObject(expected.objects[i].CID, expected.objects[i].OID, expected.objects[i].S3Bucket, expected.objects[i].S3Key, expected.objects[i].PayloadHash))
}
return objReg
}
func checkExported(expected []ObjectInfo, fileName string) error {
file, err := os.ReadFile(fileName)
if err != nil {
return err
}
if !json.Valid(file) {
return fmt.Errorf("exported json file %s is invalid", fileName)
}
var actual PreGenerateInfo
if json.Unmarshal(file, &actual) != nil {
return err
}
if len(expected) != len(actual.Objects) {
return fmt.Errorf("expected len(): %v, got len(): %v", len(expected), len(actual.Objects))
}
for i := range expected {
if !slices.ContainsFunc(actual.Objects, func(oi ObjInfo) bool {
compareS3 := oi.Bucket == expected[i].S3Bucket && oi.Object == expected[i].S3Key
comparegRPC := oi.CID == expected[i].CID && oi.OID == expected[i].OID
return compareS3 && comparegRPC
}) {
return fmt.Errorf("object %v not found in exported json file %s", expected[i], fileName)
}
}
return nil
}

View file

@ -101,7 +101,7 @@ func randomObjectInfo() ObjectInfo {
func randString(n int) string { func randString(n int) string {
var sb strings.Builder var sb strings.Builder
for i := 0; i < n; i++ { for i := 0; i < n; i++ {
sb.WriteRune('a' + rune(rand.Int())%('z'-'a'+1)) sb.WriteRune('a' + rune(rand.Int31())%('z'-'a'+1))
} }
return sb.String() return sb.String()
} }

View file

@ -3,12 +3,15 @@ package registry
import ( import (
"context" "context"
"fmt" "fmt"
"sync"
"time" "time"
"github.com/nspcc-dev/neo-go/pkg/io" "github.com/nspcc-dev/neo-go/pkg/io"
"go.etcd.io/bbolt" "go.etcd.io/bbolt"
) )
const nextObjectTimeout = 10 * time.Second
type ObjFilter struct { type ObjFilter struct {
Status string Status string
Age int Age int
@ -21,6 +24,8 @@ type ObjSelector struct {
filter *ObjFilter filter *ObjFilter
cacheSize int cacheSize int
kind SelectorKind kind SelectorKind
// Sync synchronizes VU used for deletion.
Sync sync.WaitGroup
} }
// objectSelectCache is the default maximum size of a batch to select from DB. // objectSelectCache is the default maximum size of a batch to select from DB.
@ -57,7 +62,16 @@ func NewObjSelector(registry *ObjRegistry, selectionSize int, kind SelectorKind,
// - underlying registry context is done, nil objects will be returned on the // - underlying registry context is done, nil objects will be returned on the
// currently blocked and every further NextObject calls. // currently blocked and every further NextObject calls.
func (o *ObjSelector) NextObject() *ObjectInfo { func (o *ObjSelector) NextObject() *ObjectInfo {
if o.kind == SelectorOneshot {
return <-o.objChan return <-o.objChan
}
select {
case <-time.After(nextObjectTimeout):
return nil
case obj := <-o.objChan:
return obj
}
} }
// Count returns total number of objects that match filter of the selector. // Count returns total number of objects that match filter of the selector.

View file

@ -142,6 +142,70 @@ func (c *Client) Get(bucket, key string) GetResponse {
return GetResponse{Success: true} return GetResponse{Success: true}
} }
// DeleteObjectVersion deletes object version with specified versionID.
// If version argument is empty, deletes all versions and delete-markers of specified object.
func (c *Client) DeleteObjectVersion(bucket, key, version string) DeleteResponse {
var toDelete []types.ObjectIdentifier
if version != "" {
toDelete = append(toDelete, types.ObjectIdentifier{
Key: aws.String(key),
VersionId: aws.String(version),
})
} else {
versions, err := c.cli.ListObjectVersions(c.vu.Context(), &s3.ListObjectVersionsInput{
Bucket: aws.String(bucket),
Prefix: aws.String(key),
})
if err != nil {
stats.Report(c.vu, objDeleteFails, 1)
return DeleteResponse{Success: false, Error: err.Error()}
}
toDelete = filterObjectVersions(versions, key)
}
if len(toDelete) == 0 {
return c.Delete(bucket, key)
} else {
_, err := c.cli.DeleteObjects(c.vu.Context(), &s3.DeleteObjectsInput{
Bucket: aws.String(bucket),
Delete: &types.Delete{
Objects: toDelete,
Quiet: aws.Bool(true),
},
})
if err != nil {
stats.Report(c.vu, objDeleteFails, 1)
return DeleteResponse{Success: false, Error: err.Error()}
}
}
return DeleteResponse{Success: true}
}
func filterObjectVersions(versions *s3.ListObjectVersionsOutput, key string) []types.ObjectIdentifier {
var result []types.ObjectIdentifier
for _, v := range versions.Versions {
if *v.Key == key {
result = append(result, types.ObjectIdentifier{
Key: v.Key,
VersionId: v.VersionId,
})
}
}
for _, marker := range versions.DeleteMarkers {
if *marker.Key == key {
result = append(result, types.ObjectIdentifier{
Key: marker.Key,
VersionId: marker.VersionId,
})
}
}
return result
}
func get( func get(
c *s3.Client, c *s3.Client,
bucket string, bucket string,
@ -208,13 +272,33 @@ func (c *Client) CreateBucket(bucket string, params map[string]string) CreateBuc
Bucket: aws.String(bucket), Bucket: aws.String(bucket),
ACL: types.BucketCannedACL(params["acl"]), ACL: types.BucketCannedACL(params["acl"]),
CreateBucketConfiguration: bucketConfiguration, CreateBucketConfiguration: bucketConfiguration,
ObjectLockEnabledForBucket: lockEnabled, ObjectLockEnabledForBucket: aws.Bool(lockEnabled),
}) })
if err != nil { if err != nil {
stats.Report(c.vu, createBucketFails, 1) stats.Report(c.vu, createBucketFails, 1)
return CreateBucketResponse{Success: false, Error: err.Error()} return CreateBucketResponse{Success: false, Error: err.Error()}
} }
var versioning bool
if strVersioned, ok := params["versioning"]; ok {
if versioning, err = strconv.ParseBool(strVersioned); err != nil {
stats.Report(c.vu, createBucketFails, 1)
return CreateBucketResponse{Success: false, Error: err.Error()}
}
}
if versioning {
_, err = c.cli.PutBucketVersioning(context.TODO(), &s3.PutBucketVersioningInput{
Bucket: aws.String(bucket),
VersioningConfiguration: &types.VersioningConfiguration{
Status: types.BucketVersioningStatusEnabled,
},
})
if err != nil {
stats.Report(c.vu, createBucketFails, 1)
return CreateBucketResponse{Success: false, Error: err.Error()}
}
}
stats.Report(c.vu, createBucketSuccess, 1) stats.Report(c.vu, createBucketSuccess, 1)
stats.Report(c.vu, createBucketDuration, metrics.D(time.Since(start))) stats.Report(c.vu, createBucketDuration, metrics.D(time.Since(start)))
return CreateBucketResponse{Success: true} return CreateBucketResponse{Success: true}

View file

@ -53,13 +53,9 @@ func (s *S3) Exports() modules.Exports {
} }
func (s *S3) Connect(endpoint string, params map[string]string) (*Client, error) { func (s *S3) Connect(endpoint string, params map[string]string) (*Client, error) {
resolver := aws.EndpointResolverWithOptionsFunc(func(_, _ string, _ ...interface{}) (aws.Endpoint, error) { cfg, err := config.LoadDefaultConfig(s.vu.Context(),
return aws.Endpoint{ config.WithBaseEndpoint(endpoint),
URL: endpoint, config.WithSharedConfigProfile(params["aws_profile"]))
}, nil
})
cfg, err := config.LoadDefaultConfig(s.vu.Context(), config.WithEndpointResolverWithOptions(resolver))
if err != nil { if err != nil {
return nil, fmt.Errorf("configuration error: %w", err) return nil, fmt.Errorf("configuration error: %w", err)
} }

View file

@ -5,6 +5,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
v2container "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/datagen" "git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/datagen"
"git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/local" "git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/local"
@ -15,7 +16,7 @@ import (
type Client struct { type Client struct {
vu modules.VU vu modules.VU
l layer.Client l *layer.Layer
ownerID *user.ID ownerID *user.ID
resolver layer.BucketResolver resolver layer.BucketResolver
limiter local.Limiter limiter local.Limiter
@ -42,12 +43,14 @@ func (c *Client) Put(bucket, key string, payload datagen.Payload) PutResponse {
Error: "engine size limit reached", Error: "engine size limit reached",
} }
} }
cid, err := c.resolver.Resolve(c.vu.Context(), bucket) cid, err := c.resolver.Resolve(c.vu.Context(), v2container.SysAttributeZoneDefault, bucket)
if err != nil { if err != nil {
stats.Report(c.vu, objPutFails, 1) stats.Report(c.vu, objPutFails, 1)
return PutResponse{Error: err.Error()} return PutResponse{Error: err.Error()}
} }
size := uint64(payload.Size())
prm := &layer.PutObjectParams{ prm := &layer.PutObjectParams{
BktInfo: &data.BucketInfo{ BktInfo: &data.BucketInfo{
Name: bucket, Name: bucket,
@ -57,7 +60,7 @@ func (c *Client) Put(bucket, key string, payload datagen.Payload) PutResponse {
}, },
Header: map[string]string{}, Header: map[string]string{},
Object: key, Object: key,
Size: uint64(payload.Size()), Size: &size,
Reader: payload.Reader(), Reader: payload.Reader(),
} }
@ -69,14 +72,14 @@ func (c *Client) Put(bucket, key string, payload datagen.Payload) PutResponse {
stats.Report(c.vu, objPutDuration, metrics.D(time.Since(start))) stats.Report(c.vu, objPutDuration, metrics.D(time.Since(start)))
stats.Report(c.vu, objPutSuccess, 1) stats.Report(c.vu, objPutSuccess, 1)
stats.ReportDataSent(c.vu, float64(prm.Size)) stats.ReportDataSent(c.vu, float64(size))
stats.Report(c.vu, objPutData, float64(prm.Size)) stats.Report(c.vu, objPutData, float64(size))
return PutResponse{Success: true} return PutResponse{Success: true}
} }
func (c *Client) Get(bucket, key string) GetResponse { func (c *Client) Get(bucket, key string) GetResponse {
cid, err := c.resolver.Resolve(c.vu.Context(), bucket) cid, err := c.resolver.Resolve(c.vu.Context(), v2container.SysAttributeZoneDefault, bucket)
if err != nil { if err != nil {
stats.Report(c.vu, objGetFails, 1) stats.Report(c.vu, objGetFails, 1)
return GetResponse{Error: err.Error()} return GetResponse{Error: err.Error()}

View file

@ -7,11 +7,13 @@ import (
"io" "io"
"time" "time"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer" layer "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/frostfs"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/relations"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
"git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/local/rawclient" "git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/local/rawclient"
) )
@ -33,61 +35,86 @@ func (*frostfs) CreateContainer(context.Context, layer.PrmContainerCreate) (*lay
panic(unimplementedMessage("CreateContainer")) panic(unimplementedMessage("CreateContainer"))
} }
func (*frostfs) Container(ctx context.Context, prmContainer layer.PrmContainer) (*container.Container, error) { func (*frostfs) Container(context.Context, layer.PrmContainer) (*container.Container, error) {
panic(unimplementedMessage("Container")) panic(unimplementedMessage("Container"))
} }
func (*frostfs) UserContainers(ctx context.Context, containers layer.PrmUserContainers) ([]cid.ID, error) { func (*frostfs) UserContainers(context.Context, layer.PrmUserContainers) ([]cid.ID, error) {
panic(unimplementedMessage("UserContainers")) panic(unimplementedMessage("UserContainers"))
} }
func (*frostfs) SetContainerEACL(context.Context, eacl.Table, *session.Container) error {
panic(unimplementedMessage("SetContainerEACL"))
}
func (*frostfs) ContainerEACL(ctx context.Context, containerEACL layer.PrmContainerEACL) (*eacl.Table, error) {
panic(unimplementedMessage("ContainerEACL"))
}
func (*frostfs) DeleteContainer(context.Context, cid.ID, *session.Container) error { func (*frostfs) DeleteContainer(context.Context, cid.ID, *session.Container) error {
panic(unimplementedMessage("DeleteContainer")) panic(unimplementedMessage("DeleteContainer"))
} }
func (f *frostfs) ReadObject(ctx context.Context, prm layer.PrmObjectRead) (*layer.ObjectPart, error) { func (f *frostfs) HeadObject(ctx context.Context, prm layer.PrmObjectHead) (*object.Object, error) {
return f.Get(ctx, prm.Container, prm.Object)
}
func (f *frostfs) GetObject(ctx context.Context, prm layer.PrmObjectGet) (*layer.Object, error) {
obj, err := f.Get(ctx, prm.Container, prm.Object) obj, err := f.Get(ctx, prm.Container, prm.Object)
if err != nil { if err != nil {
return nil, err return nil, err
} }
part := &layer.ObjectPart{}
if prm.WithHeader { return &layer.Object{
part.Head = obj Header: *obj,
} Payload: io.NopCloser(bytes.NewReader(obj.Payload())),
if prm.WithPayload { }, nil
part.Payload = io.NopCloser(bytes.NewReader(obj.Payload()))
}
return part, nil
} }
func (f *frostfs) CreateObject(ctx context.Context, prm layer.PrmObjectCreate) (oid.ID, error) { func (f *frostfs) CreateObject(ctx context.Context, prm layer.PrmObjectCreate) (*layer.CreateObjectResult, error) {
payload, err := io.ReadAll(prm.Payload) payload, err := io.ReadAll(prm.Payload)
if err != nil { if err != nil {
return oid.ID{}, fmt.Errorf("reading payload: %v", err) return nil, fmt.Errorf("reading payload: %v", err)
} }
hdrs := map[string]string{} hdrs := map[string]string{}
for _, attr := range prm.Attributes { for _, attr := range prm.Attributes {
hdrs[attr[0]] = attr[1] hdrs[attr[0]] = attr[1]
} }
return f.Put(ctx, prm.Container, nil, hdrs, payload) objID, err := f.Put(ctx, prm.Container, nil, hdrs, payload)
if err != nil {
return nil, err
}
return &layer.CreateObjectResult{
ObjectID: objID,
CreationEpoch: 0, // probably we should additionally invoke NetworkInfo
}, nil
} }
func (f *frostfs) DeleteObject(context.Context, layer.PrmObjectDelete) error { func (f *frostfs) DeleteObject(context.Context, layer.PrmObjectDelete) error {
panic(unimplementedMessage("DeleteObject")) panic(unimplementedMessage("DeleteObject"))
} }
func (f *frostfs) TimeToEpoch(ctx context.Context, now time.Time, future time.Time) (uint64, uint64, error) { func (f *frostfs) TimeToEpoch(context.Context, time.Time, time.Time) (uint64, uint64, error) {
panic(unimplementedMessage("TimeToEpoch")) panic(unimplementedMessage("TimeToEpoch"))
} }
func (f *frostfs) SearchObjects(ctx context.Context, search layer.PrmObjectSearch) ([]oid.ID, error) { func (f *frostfs) SearchObjects(context.Context, layer.PrmObjectSearch) ([]oid.ID, error) {
panic(unimplementedMessage("SearchObjects")) panic(unimplementedMessage("SearchObjects"))
} }
func (f *frostfs) AddContainerPolicyChain(context.Context, layer.PrmAddContainerPolicyChain) error {
panic(unimplementedMessage("AddContainerPolicyChain"))
}
func (f *frostfs) RangeObject(context.Context, layer.PrmObjectRange) (io.ReadCloser, error) {
panic(unimplementedMessage("RangeObject"))
}
func (f *frostfs) PatchObject(context.Context, layer.PrmObjectPatch) (oid.ID, error) {
panic(unimplementedMessage("PatchObject"))
}
func (f *frostfs) NetworkInfo(context.Context) (netmap.NetworkInfo, error) {
panic(unimplementedMessage("NetworkInfo"))
}
func (f *frostfs) Relations() relations.Relations {
panic(unimplementedMessage("Relations"))
}
func (f *frostfs) NetmapSnapshot(context.Context) (netmap.NetMap, error) {
panic(unimplementedMessage("NetmapSnapshot"))
}

View file

@ -1,16 +1,19 @@
package s3local package s3local
import ( import (
"context"
"flag" "flag"
"fmt" "fmt"
"time" "time"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/pkg/service/tree" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/pkg/service/tree"
v2container "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/local" "git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/local"
"git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/local/rawclient" "git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/local/rawclient"
"git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/stats" "git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/stats"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/panjf2000/ants/v2"
"go.k6.io/k6/js/modules" "go.k6.io/k6/js/modules"
"go.k6.io/k6/metrics" "go.k6.io/k6/metrics"
"go.uber.org/zap" "go.uber.org/zap"
@ -147,29 +150,66 @@ func (s *Local) Connect(configFile string, configDir string, params map[string]s
return nil, fmt.Errorf("creating bucket resolver: %v", err) return nil, fmt.Errorf("creating bucket resolver: %v", err)
} }
cfg := &layer.Config{ var owner user.ID
Cache: layer.NewCache(layer.DefaultCachesConfigs(zap.L())), user.IDFromKey(&owner, key.PrivateKey.PublicKey)
AnonKey: layer.AnonymousKey{Key: key},
Resolver: resolver, workerPool, err := ants.NewPool(100)
TreeService: treeSvc, if err != nil {
return nil, fmt.Errorf("init worker pool: %w", err)
} }
l := layer.NewLayer(zap.L(), &frostfs{rc}, cfg) anonKey, err := keys.NewPrivateKey()
err = l.Initialize(s.l.VU().Context(), nopEventListener{})
if err != nil { if err != nil {
return nil, fmt.Errorf("initialize: %w", err) return nil, fmt.Errorf("create anon key: %w", err)
}
cfg := &layer.Config{
GateOwner: owner,
Cache: layer.NewCache(layer.DefaultCachesConfigs(zap.L())),
AnonKey: layer.AnonymousKey{Key: anonKey},
Resolver: resolver,
TreeService: treeSvc,
Features: &FeatureSettings{},
GateKey: key,
WorkerPool: workerPool,
} }
return &Client{ return &Client{
vu: s.l.VU(), vu: s.l.VU(),
l: l, l: layer.NewLayer(zap.L(), &frostfs{rc}, cfg),
ownerID: rc.OwnerID(), ownerID: rc.OwnerID(),
resolver: resolver, resolver: resolver,
limiter: limiter, limiter: limiter,
}, nil }, nil
} }
type nopEventListener struct{} type FeatureSettings struct {
}
func (nopEventListener) Subscribe(context.Context, string, layer.MsgHandler) error { return nil } func (k *FeatureSettings) TombstoneLifetime() uint64 {
func (nopEventListener) Listen(context.Context) {} return 10
}
func (k *FeatureSettings) TombstoneMembersSize() int {
return 100
}
func (k *FeatureSettings) BufferMaxSizeForPut() uint64 {
return 1024 * 1024
}
func (k *FeatureSettings) ClientCut() bool {
return false
}
func (k *FeatureSettings) MD5Enabled() bool {
return false
}
func (k *FeatureSettings) FormContainerZone(ns string) string {
if ns == "" {
return v2container.SysAttributeZoneDefault
}
return ns + ".ns"
}

View file

@ -24,9 +24,9 @@ func newFixedBucketResolver(bucketMapping map[string]string) (fixedBucketResolve
return r, nil return r, nil
} }
func (r fixedBucketResolver) Resolve(_ context.Context, bucket string) (cid.ID, error) { func (r fixedBucketResolver) Resolve(_ context.Context, zone, bucket string) (cid.ID, error) {
if cid, resolved := r[bucket]; resolved { if cnrID, resolved := r[zone+"/"+bucket]; resolved {
return cid, nil return cnrID, nil
} }
return cid.ID{}, fmt.Errorf("bucket %s is not mapped to any container", bucket) return cid.ID{}, fmt.Errorf("zone %s and bucket %s is not mapped to any container", zone, bucket)
} }

View file

@ -38,15 +38,15 @@ func (kv kv) GetValue() []byte { return kv.v }
type nodeResponse struct { type nodeResponse struct {
meta []tree.Meta meta []tree.Meta
nodeID uint64 nodeID []uint64
parentID uint64 parentID []uint64
ts uint64 ts []uint64
} }
func (r nodeResponse) GetMeta() []tree.Meta { return r.meta } func (r nodeResponse) GetMeta() []tree.Meta { return r.meta }
func (r nodeResponse) GetNodeID() uint64 { return r.nodeID } func (r nodeResponse) GetNodeID() []uint64 { return r.nodeID }
func (r nodeResponse) GetParentID() uint64 { return r.parentID } func (r nodeResponse) GetParentID() []uint64 { return r.parentID }
func (r nodeResponse) GetTimestamp() uint64 { return r.ts } func (r nodeResponse) GetTimestamp() []uint64 { return r.ts }
func (s treeServiceEngineWrapper) GetNodes(ctx context.Context, p *tree.GetNodesParams) ([]tree.NodeResponse, error) { func (s treeServiceEngineWrapper) GetNodes(ctx context.Context, p *tree.GetNodesParams) ([]tree.NodeResponse, error) {
nodeIDs, err := s.ng.TreeGetByPath(ctx, p.BktInfo.CID, p.TreeID, pilorama.AttributeFilename, p.Path, p.LatestOnly) nodeIDs, err := s.ng.TreeGetByPath(ctx, p.BktInfo.CID, p.TreeID, pilorama.AttributeFilename, p.Path, p.LatestOnly)
@ -67,9 +67,9 @@ func (s treeServiceEngineWrapper) GetNodes(ctx context.Context, p *tree.GetNodes
return nil, err return nil, err
} }
resp := nodeResponse{ resp := nodeResponse{
parentID: parentID, parentID: []uint64{parentID},
nodeID: nodeID, nodeID: []uint64{nodeID},
ts: m.Time, ts: []uint64{m.Time},
} }
if p.AllAttrs { if p.AllAttrs {
resp.meta = kvToTreeMeta(m.Items) resp.meta = kvToTreeMeta(m.Items)
@ -89,9 +89,17 @@ func (s treeServiceEngineWrapper) GetNodes(ctx context.Context, p *tree.GetNodes
return resps, nil return resps, nil
} }
func (s treeServiceEngineWrapper) GetSubTree(ctx context.Context, bktInfo *data.BucketInfo, treeID string, rootID uint64, depth uint32) ([]tree.NodeResponse, error) { func (s treeServiceEngineWrapper) GetSubTree(ctx context.Context, bktInfo *data.BucketInfo, treeID string, rootID []uint64, depth uint32, sort bool) ([]tree.NodeResponse, error) {
var resps []tree.NodeResponse var resps []tree.NodeResponse
if len(rootID) != 1 {
return nil, fmt.Errorf("unsupported multiple node ids")
}
if sort {
return nil, fmt.Errorf("unsupported sorted subtree")
}
var traverse func(nodeID uint64, curDepth uint32) error var traverse func(nodeID uint64, curDepth uint32) error
traverse = func(nodeID uint64, curDepth uint32) error { traverse = func(nodeID uint64, curDepth uint32) error {
m, parentID, err := s.ng.TreeGetMeta(ctx, bktInfo.CID, treeID, nodeID) m, parentID, err := s.ng.TreeGetMeta(ctx, bktInfo.CID, treeID, nodeID)
@ -100,9 +108,9 @@ func (s treeServiceEngineWrapper) GetSubTree(ctx context.Context, bktInfo *data.
} }
resps = append(resps, nodeResponse{ resps = append(resps, nodeResponse{
nodeID: nodeID, nodeID: []uint64{nodeID},
parentID: parentID, parentID: []uint64{parentID},
ts: m.Time, ts: []uint64{m.Time},
meta: kvToTreeMeta(m.Items), meta: kvToTreeMeta(m.Items),
}) })
@ -122,7 +130,7 @@ func (s treeServiceEngineWrapper) GetSubTree(ctx context.Context, bktInfo *data.
return nil return nil
} }
if err := traverse(rootID, 0); err != nil { if err := traverse(rootID[0], 0); err != nil {
return nil, fmt.Errorf("traversing: %v", err) return nil, fmt.Errorf("traversing: %v", err)
} }
@ -192,7 +200,7 @@ func (s treeServiceEngineWrapper) RemoveNode(ctx context.Context, bktInfo *data.
} }
func (s treeServiceEngineWrapper) GetSubTreeStream( func (s treeServiceEngineWrapper) GetSubTreeStream(
ctx context.Context, bktInfo *data.BucketInfo, treeID string, rootID uint64, depth uint32, ctx context.Context, bktInfo *data.BucketInfo, treeID string, rootID []uint64, depth uint32,
) (tree.SubTreeStream, error) { ) (tree.SubTreeStream, error) {
panic(unimplementedMessage("TreeService.GetSubTreeStream")) panic(unimplementedMessage("TreeService.GetSubTreeStream"))
} }

View file

@ -1,25 +1,25 @@
import {sleep} from 'k6'; import { sleep } from 'k6';
import {SharedArray} from 'k6/data'; import { SharedArray } from 'k6/data';
import exec from 'k6/execution'; import exec from 'k6/execution';
import logging from 'k6/x/frostfs/logging'; import logging from 'k6/x/frostfs/logging';
import native from 'k6/x/frostfs/native'; import native from 'k6/x/frostfs/native';
import registry from 'k6/x/frostfs/registry'; import registry from 'k6/x/frostfs/registry';
import stats from 'k6/x/frostfs/stats'; import stats from 'k6/x/frostfs/stats';
import {newGenerator} from './libs/datagen.js'; import { newGenerator } from './libs/datagen.js';
import {parseEnv} from './libs/env-parser.js'; import { parseEnv } from './libs/env-parser.js';
import {textSummary} from './libs/k6-summary-0.0.2.js'; import { textSummary } from './libs/k6-summary-0.0.2.js';
import {uuidv4} from './libs/k6-utils-1.4.0.js'; import { uuidv4 } from './libs/k6-utils-1.4.0.js';
parseEnv(); parseEnv();
const obj_list = new SharedArray( const obj_list = new SharedArray(
'obj_list', 'obj_list',
function() { return JSON.parse(open(__ENV.PREGEN_JSON)).objects; }); function () { return JSON.parse(open(__ENV.PREGEN_JSON)).objects; });
const container_list = new SharedArray( const container_list = new SharedArray(
'container_list', 'container_list',
function() { return JSON.parse(open(__ENV.PREGEN_JSON)).containers; }); function () { return JSON.parse(open(__ENV.PREGEN_JSON)).containers; });
const read_size = JSON.parse(open(__ENV.PREGEN_JSON)).obj_size; const read_size = JSON.parse(open(__ENV.PREGEN_JSON)).obj_size;
const summary_json = __ENV.SUMMARY_JSON || '/tmp/summary.json'; const summary_json = __ENV.SUMMARY_JSON || '/tmp/summary.json';
@ -31,8 +31,8 @@ const grpc_endpoint =
const grpc_client = native.connect( const grpc_client = native.connect(
grpc_endpoint, '', __ENV.DIAL_TIMEOUT ? parseInt(__ENV.DIAL_TIMEOUT) : 5, grpc_endpoint, '', __ENV.DIAL_TIMEOUT ? parseInt(__ENV.DIAL_TIMEOUT) : 5,
__ENV.STREAM_TIMEOUT ? parseInt(__ENV.STREAM_TIMEOUT) : 60, __ENV.STREAM_TIMEOUT ? parseInt(__ENV.STREAM_TIMEOUT) : 60,
__ENV.PREPARE_LOCALLY ? __ENV.PREPARE_LOCALLY.toLowerCase() === 'true' __ENV.PREPARE_LOCALLY ? __ENV.PREPARE_LOCALLY.toLowerCase() === 'true' : false,
: false); 1024 * parseInt(__ENV.MAX_OBJECT_SIZE || '0'));
const log = logging.new().withField('endpoint', grpc_endpoint); const log = logging.new().withField('endpoint', grpc_endpoint);
const registry_enabled = !!__ENV.REGISTRY_FILE; const registry_enabled = !!__ENV.REGISTRY_FILE;
@ -51,8 +51,8 @@ if (registry_enabled) {
obj_to_read_selector = registry.getLoopedSelector( obj_to_read_selector = registry.getLoopedSelector(
__ENV.REGISTRY_FILE, 'obj_to_read', __ENV.REGISTRY_FILE, 'obj_to_read',
__ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0, { __ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0, {
status : 'created', status: 'created',
age : read_age, age: read_age,
}) })
} }
@ -63,11 +63,11 @@ const write_grpc_chunk_size = 1024 * parseInt(__ENV.GRPC_CHUNK_SIZE || '0')
const generator = newGenerator(write_vu_count > 0); const generator = newGenerator(write_vu_count > 0);
if (write_vu_count > 0) { if (write_vu_count > 0) {
scenarios.write = { scenarios.write = {
executor : 'constant-vus', executor: 'constant-vus',
vus : write_vu_count, vus: write_vu_count,
duration : `${duration}s`, duration: `${duration}s`,
exec : 'obj_write', exec: 'obj_write',
gracefulStop : '5s', gracefulStop: '5s',
}; };
} }
@ -83,19 +83,19 @@ if (registry_enabled && delete_age) {
obj_to_delete_selector = obj_to_delete_selector =
constructor(__ENV.REGISTRY_FILE, 'obj_to_delete', constructor(__ENV.REGISTRY_FILE, 'obj_to_delete',
__ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0, { __ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0, {
status : 'created', status: 'created',
age : delete_age, age: delete_age,
}); });
} }
const read_vu_count = parseInt(__ENV.READERS || '0'); const read_vu_count = parseInt(__ENV.READERS || '0');
if (read_vu_count > 0) { if (read_vu_count > 0) {
scenarios.read = { scenarios.read = {
executor : 'constant-vus', executor: 'constant-vus',
vus : read_vu_count, vus: read_vu_count,
duration : `${duration}s`, duration: `${duration}s`,
exec : 'obj_read', exec: 'obj_read',
gracefulStop : '5s', gracefulStop: '5s',
}; };
} }
@ -107,17 +107,17 @@ if (delete_vu_count > 0) {
} }
scenarios.delete = { scenarios.delete = {
executor : 'constant-vus', executor: 'constant-vus',
vus : delete_vu_count, vus: delete_vu_count,
duration : `${duration}s`, duration: `${duration}s`,
exec : 'obj_delete', exec: 'obj_delete',
gracefulStop : '5s', gracefulStop: '5s',
}; };
} }
export const options = { export const options = {
scenarios, scenarios,
setupTimeout : '5s', setupTimeout: '5s',
}; };
export function setup() { export function setup() {
@ -147,8 +147,8 @@ export function teardown(data) {
export function handleSummary(data) { export function handleSummary(data) {
return { return {
'stdout' : textSummary(data, {indent : ' ', enableColors : false}), 'stdout': textSummary(data, { indent: ' ', enableColors: false }),
[summary_json] : JSON.stringify(data), [summary_json]: JSON.stringify(data),
}; };
} }
@ -157,7 +157,7 @@ export function obj_write() {
sleep(__ENV.SLEEP_WRITE); sleep(__ENV.SLEEP_WRITE);
} }
const headers = {unique_header : uuidv4()}; const headers = { unique_header: uuidv4() };
const container = const container =
container_list[Math.floor(Math.random() * container_list.length)]; container_list[Math.floor(Math.random() * container_list.length)];
@ -186,7 +186,7 @@ export function obj_read() {
} }
const resp = grpc_client.get(obj.c_id, obj.o_id) const resp = grpc_client.get(obj.c_id, obj.o_id)
if (!resp.success) { if (!resp.success) {
log.withFields({cid : obj.c_id, oid : obj.o_id}).error(resp.error); log.withFields({ cid: obj.c_id, oid: obj.o_id }).error(resp.error);
} }
return return
} }
@ -194,7 +194,7 @@ export function obj_read() {
const obj = obj_list[Math.floor(Math.random() * obj_list.length)]; const obj = obj_list[Math.floor(Math.random() * obj_list.length)];
const resp = grpc_client.get(obj.container, obj.object) const resp = grpc_client.get(obj.container, obj.object)
if (!resp.success) { if (!resp.success) {
log.withFields({cid : obj.container, oid : obj.object}).error(resp.error); log.withFields({ cid: obj.container, oid: obj.object }).error(resp.error);
} }
} }
@ -214,7 +214,7 @@ export function obj_delete() {
const resp = grpc_client.delete(obj.c_id, obj.o_id); const resp = grpc_client.delete(obj.c_id, obj.o_id);
if (!resp.success) { if (!resp.success) {
// Log errors except (2052 - object already deleted) // Log errors except (2052 - object already deleted)
log.withFields({cid : obj.c_id, oid : obj.o_id}).error(resp.error); log.withFields({ cid: obj.c_id, oid: obj.o_id }).error(resp.error);
return; return;
} }

View file

@ -1,22 +1,22 @@
import {sleep} from 'k6'; import { sleep } from 'k6';
import {SharedArray} from 'k6/data'; import { SharedArray } from 'k6/data';
import logging from 'k6/x/frostfs/logging'; import logging from 'k6/x/frostfs/logging';
import native from 'k6/x/frostfs/native'; import native from 'k6/x/frostfs/native';
import registry from 'k6/x/frostfs/registry'; import registry from 'k6/x/frostfs/registry';
import stats from 'k6/x/frostfs/stats'; import stats from 'k6/x/frostfs/stats';
import {newGenerator} from './libs/datagen.js'; import { newGenerator } from './libs/datagen.js';
import {parseEnv} from './libs/env-parser.js'; import { parseEnv } from './libs/env-parser.js';
import {textSummary} from './libs/k6-summary-0.0.2.js'; import { textSummary } from './libs/k6-summary-0.0.2.js';
import {uuidv4} from './libs/k6-utils-1.4.0.js'; import { uuidv4 } from './libs/k6-utils-1.4.0.js';
parseEnv(); parseEnv();
const obj_list = new SharedArray('obj_list', function() { const obj_list = new SharedArray('obj_list', function () {
return JSON.parse(open(__ENV.PREGEN_JSON)).objects; return JSON.parse(open(__ENV.PREGEN_JSON)).objects;
}); });
const container_list = new SharedArray('container_list', function() { const container_list = new SharedArray('container_list', function () {
return JSON.parse(open(__ENV.PREGEN_JSON)).containers; return JSON.parse(open(__ENV.PREGEN_JSON)).containers;
}); });
@ -30,8 +30,8 @@ const grpc_endpoint =
const grpc_client = native.connect( const grpc_client = native.connect(
grpc_endpoint, '', __ENV.DIAL_TIMEOUT ? parseInt(__ENV.DIAL_TIMEOUT) : 5, grpc_endpoint, '', __ENV.DIAL_TIMEOUT ? parseInt(__ENV.DIAL_TIMEOUT) : 5,
__ENV.STREAM_TIMEOUT ? parseInt(__ENV.STREAM_TIMEOUT) : 60, __ENV.STREAM_TIMEOUT ? parseInt(__ENV.STREAM_TIMEOUT) : 60,
__ENV.PREPARE_LOCALLY ? __ENV.PREPARE_LOCALLY.toLowerCase() === 'true' : __ENV.PREPARE_LOCALLY ? __ENV.PREPARE_LOCALLY.toLowerCase() === 'true' : false,
false); 1024 * parseInt(__ENV.MAX_OBJECT_SIZE || '0'));
const log = logging.new().withField('endpoint', grpc_endpoint); const log = logging.new().withField('endpoint', grpc_endpoint);
const registry_enabled = !!__ENV.REGISTRY_FILE; const registry_enabled = !!__ENV.REGISTRY_FILE;
@ -166,7 +166,7 @@ export function teardown(data) {
export function handleSummary(data) { export function handleSummary(data) {
return { return {
'stdout': textSummary(data, {indent: ' ', enableColors: false}), 'stdout': textSummary(data, { indent: ' ', enableColors: false }),
[summary_json]: JSON.stringify(data), [summary_json]: JSON.stringify(data),
}; };
} }
@ -176,7 +176,7 @@ export function obj_write() {
sleep(__ENV.SLEEP_WRITE); sleep(__ENV.SLEEP_WRITE);
} }
const headers = {unique_header: uuidv4()}; const headers = { unique_header: uuidv4() };
const container = const container =
container_list[Math.floor(Math.random() * container_list.length)]; container_list[Math.floor(Math.random() * container_list.length)];
@ -205,7 +205,7 @@ export function obj_read() {
} }
const resp = grpc_client.get(obj.c_id, obj.o_id) const resp = grpc_client.get(obj.c_id, obj.o_id)
if (!resp.success) { if (!resp.success) {
log.withFields({cid: obj.c_id, oid: obj.o_id}).error(resp.error); log.withFields({ cid: obj.c_id, oid: obj.o_id }).error(resp.error);
} }
return return
} }
@ -213,7 +213,7 @@ export function obj_read() {
const obj = obj_list[Math.floor(Math.random() * obj_list.length)]; const obj = obj_list[Math.floor(Math.random() * obj_list.length)];
const resp = grpc_client.get(obj.container, obj.object) const resp = grpc_client.get(obj.container, obj.object)
if (!resp.success) { if (!resp.success) {
log.withFields({cid: obj.container, oid: obj.object}).error(resp.error); log.withFields({ cid: obj.container, oid: obj.object }).error(resp.error);
} }
} }
@ -230,7 +230,7 @@ export function obj_delete() {
const resp = grpc_client.delete(obj.c_id, obj.o_id); const resp = grpc_client.delete(obj.c_id, obj.o_id);
if (!resp.success) { if (!resp.success) {
// Log errors except (2052 - object already deleted) // Log errors except (2052 - object already deleted)
log.withFields({cid: obj.c_id, oid: obj.o_id}).error(resp.error); log.withFields({ cid: obj.c_id, oid: obj.o_id }).error(resp.error);
return; return;
} }

View file

@ -1,2 +1,2 @@
(()=>{"use strict";var t={n:r=>{var e=r&&r.__esModule?()=>r.default:()=>r;return t.d(e,{a:e}),e},d:(r,e)=>{for(var n in e)t.o(e,n)&&!t.o(r,n)&&Object.defineProperty(r,n,{enumerable:!0,get:e[n]})},o:(t,r)=>Object.prototype.hasOwnProperty.call(t,r),r:t=>{"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(t,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(t,"__esModule",{value:!0})}},r={};t.r(r),t.d(r,{findBetween:()=>x,getCurrentStageIndex:()=>i,normalDistributionStages:()=>m,parseDuration:()=>o,randomIntBetween:()=>d,randomItem:()=>h,randomString:()=>p,tagWithCurrentStageIndex:()=>u,tagWithCurrentStageProfile:()=>s,uuidv4:()=>g});const e=require("k6/execution");var n=t.n(e);function o(t){if(null==t||t.length<1)throw new Error("str is empty");for(var r=0,e="",n={},o=0;o<t.length;o++)if((a(t[o])||"."==t[o])&&(e+=t[o]),null!=t[o+1]&&!a(t[o+1])&&"."!=t[o+1]){var i=parseFloat(e,10),u=t[o+1];switch(u){case"d":r+=24*i*60*60*1e3;break;case"h":r+=60*i*60*1e3;break;case"m":o+2<t.length&&"s"==t[o+2]?(r+=Math.trunc(i),o++,u="ms"):r+=60*i*1e3;break;case"s":r+=1e3*i;break;default:throw new Error("".concat(u," is an unsupported time unit"))}if(n[u])throw new Error("".concat(u," time unit is provided multiple times"));n[u]=!0,o++,e=""}return e.length>0&&(r+=parseFloat(e,10)),r}function a(t){return t>="0"&&t<="9"}function i(){if(null==n()||null==n().test||null==n().test.options)throw new Error("k6/execution.test.options is undefined - getCurrentStageIndex requires a k6 v0.38.0 or later. Please, upgrade for getting k6/execution.test.options supported.");var t=n().test.options.scenarios[n().scenario.name];if(null==t)throw new Error("the exec.test.options object doesn't contain the current scenario ".concat(n().scenario.name));if(null==t.stages)throw new Error("only ramping-vus or ramping-arravial-rate supports stages, it is not possible to get a stage index on other executors.");if(t.stages.length<1)throw new Error("the current scenario ".concat(t.name," doesn't contain any stage"));for(var r=0,e=new Date-n().scenario.startTime,a=0;a<t.stages.length;a++)if(e<(r+=o(t.stages[a].duration)))return a;return t.stages.length-1}function u(){n().vu.tags.stage=i()}function s(){n().vu.tags.stage_profile=function(){var t=i();if(t<1)return"ramp-up";var r=n().test.options.scenarios[n().scenario.name].stages,e=r[t],o=r[t-1];return e.target>o.target?"ramp-up":o.target==e.target?"steady":"ramp-down"}()}const l=require("k6/crypto");function c(t){return function(t){if(Array.isArray(t))return f(t)}(t)||function(t){if("undefined"!=typeof Symbol&&null!=t[Symbol.iterator]||null!=t["@@iterator"])return Array.from(t)}(t)||function(t,r){if(!t)return;if("string"==typeof t)return f(t,r);var e=Object.prototype.toString.call(t).slice(8,-1);"Object"===e&&t.constructor&&(e=t.constructor.name);if("Map"===e||"Set"===e)return Array.from(t);if("Arguments"===e||/^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(e))return f(t,r)}(t)||function(){throw new TypeError("Invalid attempt to spread non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")}()}function f(t,r){(null==r||r>t.length)&&(r=t.length);for(var e=0,n=new Array(r);e<r;e++)n[e]=t[e];return n}function g(){var t=arguments.length>0&&void 0!==arguments[0]&&arguments[0];return t?y():v()}function d(t,r){return Math.floor(Math.random()*(r-t+1)+t)}function h(t){return t[Math.floor(Math.random()*t.length)]}function p(t){for(var r=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"abcdefghijklmnopqrstuvwxyz",e="";t--;)e+=r[Math.random()*r.length|0];return e}function x(t,r,e){for(var n,o=arguments.length>3&&void 0!==arguments[3]&&arguments[3],a=[],i=!0,u=0;i&&-1!=(n=t.indexOf(r))&&(n+=r.length,-1!=(u=t.indexOf(e,n)));){var s=t.substring(n,u);if(!o)return s;a.push(s),t=t.substring(u+e.length)}return a.length?a:null}function m(t,r){var e=arguments.length>2&&void 0!==arguments[2]?arguments[2]:10;function n(t,r,e){return Math.exp(-.5*Math.pow((e-t)/r,2))/(r*Math.sqrt(2*Math.PI))}for(var o=0,a=1,i=new Array(e+2).fill(0),u=new Array(e+2).fill(Math.ceil(r/6)),s=[],l=0;l<=e;l++)i[l]=n(o,a,-2*a+4*a*l/e);for(var f=Math.max.apply(Math,c(i)),g=i.map((function(r){return Math.round(r*t/f)})),d=1;d<=e;d++)u[d]=Math.ceil(4*r/(6*e));for(var h=0;h<=e+1;h++)s.push({duration:"".concat(u[h],"s"),target:g[h]});return s}function v(){return"xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx".replace(/[xy]/g,(function(t){var r=16*Math.random()|0;return("x"===t?r:3&r|8).toString(16)}))}function y(){for(var t=[],r=0;r<256;++r)t.push((r+256).toString(16).slice(1));var e=new Uint8Array((0,l.randomBytes)(16));return e[6]=15&e[6]|64,e[8]=63&e[8]|128,(t[e[0]]+t[e[1]]+t[e[2]]+t[e[3]]+"-"+t[e[4]]+t[e[5]]+"-"+t[e[6]]+t[e[7]]+"-"+t[e[8]]+t[e[9]]+"-"+t[e[10]]+t[e[11]]+t[e[12]]+t[e[13]]+t[e[14]]+t[e[15]]).toLowerCase()}var w=exports;for(var b in r)w[b]=r[b];r.__esModule&&Object.defineProperty(w,"__esModule",{value:!0})})(); (()=>{"use strict";var t={n:r=>{var e=r&&r.__esModule?()=>r.default:()=>r;return t.d(e,{a:e}),e},d:(r,e)=>{for(var n in e)t.o(e,n)&&!t.o(r,n)&&Object.defineProperty(r,n,{enumerable:!0,get:e[n]})},o:(t,r)=>Object.prototype.hasOwnProperty.call(t,r),r:t=>{"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(t,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(t,"__esModule",{value:!0})}},r={};t.r(r),t.d(r,{findBetween:()=>x,getCurrentStageIndex:()=>i,normalDistributionStages:()=>m,parseDuration:()=>o,randomIntBetween:()=>d,randomItem:()=>h,randomString:()=>p,tagWithCurrentStageIndex:()=>u,tagWithCurrentStageProfile:()=>s,uuidv4:()=>g});const e=require("k6/execution");var n=t.n(e);function o(t){if(null==t||t.length<1)throw new Error("str is empty");for(var r=0,e="",n={},o=0;o<t.length;o++)if((a(t[o])||"."==t[o])&&(e+=t[o]),null!=t[o+1]&&!a(t[o+1])&&"."!=t[o+1]){var i=parseFloat(e,10),u=t[o+1];switch(u){case"d":r+=24*i*60*60*1e3;break;case"h":r+=60*i*60*1e3;break;case"m":o+2<t.length&&"s"==t[o+2]?(r+=Math.trunc(i),o++,u="ms"):r+=60*i*1e3;break;case"s":r+=1e3*i;break;default:throw new Error("".concat(u," is an unsupported time unit"))}if(n[u])throw new Error("".concat(u," time unit is provided multiple times"));n[u]=!0,o++,e=""}return e.length>0&&(r+=parseFloat(e,10)),r}function a(t){return t>="0"&&t<="9"}function i(){if(null==n()||null==n().test||null==n().test.options)throw new Error("k6/execution.test.options is undefined - getCurrentStageIndex requires a k6 v0.38.0 or later. Please, upgrade for getting k6/execution.test.options supported.");var t=n().test.options.scenarios[n().scenario.name];if(null==t)throw new Error("the exec.test.options object doesn't contain the current scenario ".concat(n().scenario.name));if(null==t.stages)throw new Error("only ramping-vus or ramping-arravial-rate supports stages, it is not possible to get a stage index on other executors.");if(t.stages.length<1)throw new Error("the current scenario ".concat(t.name," doesn't contain any stage"));for(var r=0,e=new Date-n().scenario.startTime,a=0;a<t.stages.length;a++)if(e<(r+=o(t.stages[a].duration)))return a;return t.stages.length-1}function u(){n().vu.tags.stage=i()}function s(){n().vu.tags.stage_profile=function(){var t=i();if(t<1)return"ramp-up";var r=n().test.options.scenarios[n().scenario.name].stages,e=r[t],o=r[t-1];return e.target>o.target?"ramp-up":o.target==e.target?"steady":"ramp-down"}()}const l=require("k6/crypto");function c(t){return function(t){if(Array.isArray(t))return f(t)}(t)||function(t){if("undefined"!=typeof Symbol&&null!=t[Symbol.iterator]||null!=t["@@iterator"])return Array.from(t)}(t)||function(t,r){if(!t)return;if("string"==typeof t)return f(t,r);var e=Object.prototype.toString.call(t).slice(8,-1);"Object"===e&&t.constructor&&(e=t.constructor.name);if("Map"===e||"Set"===e)return Array.from(t);if("Arguments"===e||/^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(e))return f(t,r)}(t)||function(){throw new TypeError("Invalid attempt to spread non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")}()}function f(t,r){(null==r||r>t.length)&&(r=t.length);for(var e=0,n=new Array(r);e<r;e++)n[e]=t[e];return n}function g(){var t=arguments.length>0&&void 0!==arguments[0]&&arguments[0];return t?y():v()}function d(t,r){return Math.floor(Math.random()*(r-t+1)+t)}function h(t){return t[Math.floor(Math.random()*t.length)]}function p(t){for(var r=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"abcdefghijklmnopqrstuvwxyz",e="";t--;)e+=r[Math.random()*r.length|0];return e}function x(t,r,e){for(var n,o=arguments.length>3&&void 0!==arguments[3]&&arguments[3],a=[],i=!0,u=0;i&&-1!=(n=t.indexOf(r))&&(n+=r.length,-1!=(u=t.indexOf(e,n)));){var s=t.substring(n,u);if(!o)return s;a.push(s),t=t.substring(u+e.length)}return a.length?a:null}function m(t,r){var e=arguments.length>2&&void 0!==arguments[2]?arguments[2]:10;function n(t,r,e){return Math.exp(-.5*Math.pow((e-t)/r,2))/(r*Math.sqrt(2*Math.PI))}for(var o=0,a=1,i=new Array(e+2).fill(0),u=new Array(e+2).fill(Math.ceil(r/6)),s=[],l=0;l<=e;l++)i[l]=n(o,a,-2*a+4*a*l/e);for(var f=Math.max.apply(Math,c(i)),g=i.map((function(r){return Math.round(r*t/f)})),d=1;d<=e;d++)u[d]=Math.ceil(4*r/(6*e));for(var h=0;h<=e+1;h++)s.push({duration:"".concat(u[h],"s"),target:g[h]});return s}function v(){return"xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx".replace(/[xy]/g,(function(t){var r=16*Math.random()|0;return("x"===t?r:3&r|8).toString(16)}))}function y(){for(var t=[],r=0;r<256;++r)t.push((r+256).toString(16).slice(1));var e=new Uint8Array((0,l.randomBytes)(16));return e[6]=15&e[6]|64,e[8]=63&e[8]|128,(t[e[0]]+t[e[1]]+t[e[2]]+t[e[3]]+"-"+t[e[4]]+t[e[5]]+"-"+t[e[6]]+t[e[7]]+"-"+t[e[8]]+t[e[9]]+"-"+t[e[10]]+t[e[11]]+t[e[12]]+t[e[13]]+t[e[14]]+t[e[15]]).toLowerCase()}var w=exports;for(var b in r)w[b]=r[b];r.__esModule&&Object.defineProperty(w,"__esModule",{value:!0})})();
//# sourceMappingURL=index.js.map

34
scenarios/libs/keygen.js Normal file
View file

@ -0,0 +1,34 @@
import { uuidv4 } from './k6-utils-1.4.0.js';
export function generateS3Key() {
let width = parseInt(__ENV.DIR_WIDTH || '0');
let height = parseInt(__ENV.DIR_HEIGHT || '0');
let key = ''
if (width > 0 && height > 0) {
for (let index = 0; index < height; index++) {
const w = Math.floor(Math.random() * width) + 1;
key = key + 'dir' + w + '/';
}
}
key += objName();
return key;
}
const asciiLetters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890"
function objName() {
if (__ENV.OBJ_NAME) {
return __ENV.OBJ_NAME;
}
const length = parseInt(__ENV.OBJ_NAME_LENGTH || '0');
if (length > 0) {
let name = "";
for (let i = 0; i < length; i++) {
name += asciiLetters.charAt(Math.floor(Math.random() * asciiLetters.length));
}
return name;
}
return uuidv4();
}

View file

@ -4,17 +4,18 @@ from helpers.cmd import execute_cmd, log
def create_bucket(endpoint, versioning, location, acl, no_verify_ssl): def create_bucket(endpoint, versioning, location, acl, no_verify_ssl):
configuration = ""
if location: if location:
location = f"--create-bucket-configuration 'LocationConstraint={location}'" configuration = f"--create-bucket-configuration 'LocationConstraint={location}'"
if acl: if acl:
acl = f"--acl {acl}" acl = f"--acl {acl}"
bucket_name = str(uuid.uuid4()) bucket_name = str(uuid.uuid4())
no_verify_ssl_str = "--no-verify-ssl" if no_verify_ssl else "" no_verify_ssl_str = "--no-verify-ssl" if no_verify_ssl else ""
cmd_line = f"aws {no_verify_ssl_str} s3api create-bucket --bucket {bucket_name} " \ cmd_line = f"aws {no_verify_ssl_str} s3api create-bucket --bucket {bucket_name} " \
f"--endpoint {endpoint} {location} {acl} " f"--endpoint {endpoint} {configuration} {acl} "
cmd_line_ver = f"aws {no_verify_ssl_str} s3api put-bucket-versioning --bucket {bucket_name} " \ cmd_line_ver = f"aws {no_verify_ssl_str} s3api put-bucket-versioning --bucket {bucket_name} " \
f"--versioning-configuration Status=Enabled --endpoint {endpoint} {acl} " f"--versioning-configuration Status=Enabled --endpoint {endpoint}"
output, success = execute_cmd(cmd_line) output, success = execute_cmd(cmd_line)
@ -24,7 +25,7 @@ def create_bucket(endpoint, versioning, location, acl, no_verify_ssl):
f"Error: {output}", endpoint) f"Error: {output}", endpoint)
return False return False
if versioning == "True": if versioning:
output, success = execute_cmd(cmd_line_ver) output, success = execute_cmd(cmd_line_ver)
if not success: if not success:
log(f"{cmd_line_ver}\n" log(f"{cmd_line_ver}\n"
@ -33,7 +34,7 @@ def create_bucket(endpoint, versioning, location, acl, no_verify_ssl):
else: else:
log(f"Bucket versioning has been applied for bucket {bucket_name}", endpoint) log(f"Bucket versioning has been applied for bucket {bucket_name}", endpoint)
log(f"Created bucket: {bucket_name}", endpoint) log(f"Created bucket: {bucket_name} ({location})", endpoint)
return bucket_name return bucket_name

View file

@ -1,18 +1,16 @@
import re import re
from helpers.cmd import execute_cmd, log from helpers.cmd import execute_cmd, log
def create_container(endpoint, policy, wallet_path, config, acl, local=False, depth=0): def create_container(endpoint, policy, container_creation_retry, wallet_path, config, rules, local=False, retry=0):
if depth > 20: if retry > int(container_creation_retry):
raise ValueError(f"unable to create container: too many unsuccessful attempts") raise ValueError(f"unable to create container: too many unsuccessful attempts")
if wallet_path: if wallet_path:
wallet_file = f"--wallet {wallet_path}" wallet_file = f"--wallet {wallet_path}"
if config: if config:
wallet_config = f"--config {config}" wallet_config = f"--config {config}"
if acl:
acl_param = f"--basic-acl {acl}"
cmd_line = f"frostfs-cli --rpc-endpoint {endpoint} container create {wallet_file} {wallet_config} " \ cmd_line = f"frostfs-cli --rpc-endpoint {endpoint} container create {wallet_file} {wallet_config} " \
f" --policy '{policy}' {acl_param} --await" f" --policy '{policy}' --await"
output, success = execute_cmd(cmd_line) output, success = execute_cmd(cmd_line)
@ -34,7 +32,21 @@ def create_container(endpoint, policy, wallet_path, config, acl, local=False, de
raise ValueError(f"no CID was parsed from command output:\t{fst_str}") raise ValueError(f"no CID was parsed from command output:\t{fst_str}")
cid = splitted[1] cid = splitted[1]
log(f"Created container {cid}", endpoint) log(f"Created container: {cid} ({policy})", endpoint)
# Add rule for container
if rules:
r = ""
for rule in rules:
r += f" --rule '{rule}' "
cmd_line = f"frostfs-cli --rpc-endpoint {endpoint} ape-manager add {wallet_file} {wallet_config} " \
f" --chain-id 'chain-id' {r} --target-name '{cid}' --target-type 'container'"
output, success = execute_cmd(cmd_line)
if not success:
log(f"{cmd_line}\n"
f"Rule has not been added\n"
f"{output}", endpoint)
return False
if not local: if not local:
return cid return cid
@ -88,7 +100,7 @@ def create_container(endpoint, policy, wallet_path, config, acl, local=False, de
return cid return cid
log(f"Created container {cid} is not stored on {endpoint}, creating another one...", endpoint) log(f"Created container {cid} is not stored on {endpoint}, creating another one...", endpoint)
return create_container(endpoint, policy, wallet_path, config, acl, local, depth + 1) return create_container(endpoint, policy, container_creation_retry, wallet_path, config, rules, local, retry + 1)
def upload_object(container, payload_filepath, endpoint, wallet_file, wallet_config): def upload_object(container, payload_filepath, endpoint, wallet_file, wallet_config):

View file

@ -15,18 +15,21 @@ from helpers.frostfs_cli import create_container, upload_object
ERROR_WRONG_CONTAINERS_COUNT = 1 ERROR_WRONG_CONTAINERS_COUNT = 1
ERROR_WRONG_OBJECTS_COUNT = 2 ERROR_WRONG_OBJECTS_COUNT = 2
MAX_WORKERS = 50 MAX_WORKERS = 50
DEFAULT_POLICY = "REP 2 IN X CBF 2 SELECT 2 FROM * AS X"
DEFAULT_RULES = ["allow Object.* *"]
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument('--size', help='Upload objects size in kb') parser.add_argument('--size', help='Upload objects size in kb')
parser.add_argument('--containers', help='Number of containers to create') parser.add_argument('--containers', help='Number of containers to create')
parser.add_argument('--retry', default=20, help='Maximum number of retries to create a container')
parser.add_argument('--out', help='JSON file with output') parser.add_argument('--out', help='JSON file with output')
parser.add_argument('--preload_obj', help='Number of pre-loaded objects') parser.add_argument('--preload_obj', help='Number of pre-loaded objects')
parser.add_argument('--wallet', help='Wallet file path') parser.add_argument('--wallet', help='Wallet file path')
parser.add_argument('--config', help='Wallet config file path') parser.add_argument('--config', help='Wallet config file path')
parser.add_argument( parser.add_argument(
"--policy", "--policy",
help="Container placement policy", help=f"Container placement policy. Default is {DEFAULT_POLICY}",
default="REP 2 IN X CBF 2 SELECT 2 FROM * AS X" action="append"
) )
parser.add_argument('--endpoint', help='Nodes addresses separated by comma.') parser.add_argument('--endpoint', help='Nodes addresses separated by comma.')
parser.add_argument('--update', help='Save existed containers') parser.add_argument('--update', help='Save existed containers')
@ -35,7 +38,10 @@ parser.add_argument('--workers', help='Count of workers in preset. Max = 50, Def
parser.add_argument('--sleep', help='Time to sleep between containers creation and objects upload (in seconds), ' parser.add_argument('--sleep', help='Time to sleep between containers creation and objects upload (in seconds), '
'Default = 8', default=8) 'Default = 8', default=8)
parser.add_argument('--local', help='Create containers that store data on provided endpoints. Warning: additional empty containers may be created.', action='store_true') parser.add_argument('--local', help='Create containers that store data on provided endpoints. Warning: additional empty containers may be created.', action='store_true')
parser.add_argument('--acl', help='Container ACL. Default is public-read-write.', default='public-read-write') parser.add_argument(
'--rule',
help='Rule attached to created containers. All entries of CONTAINER_ID will be replaced with id of created container.',
action="append")
args: Namespace = parser.parse_args() args: Namespace = parser.parse_args()
print(args) print(args)
@ -46,11 +52,17 @@ def main():
objects_list = [] objects_list = []
endpoints = args.endpoint.split(',') endpoints = args.endpoint.split(',')
if not args.policy:
args.policy = [DEFAULT_POLICY]
container_creation_retry = args.retry
wallet = args.wallet wallet = args.wallet
wallet_config = args.config wallet_config = args.config
workers = int(args.workers) workers = int(args.workers)
objects_per_container = int(args.preload_obj) objects_per_container = int(args.preload_obj)
rules = args.rule
if not rules:
rules = DEFAULT_RULES
ignore_errors = args.ignore_errors ignore_errors = args.ignore_errors
if args.update: if args.update:
@ -63,9 +75,9 @@ def main():
containers_count = int(args.containers) containers_count = int(args.containers)
print(f"Create containers: {containers_count}") print(f"Create containers: {containers_count}")
with ProcessPoolExecutor(max_workers=min(MAX_WORKERS, workers)) as executor: with ProcessPoolExecutor(max_workers=min(MAX_WORKERS, workers)) as executor:
containers_runs = [executor.submit(create_container, endpoint, args.policy, wallet, wallet_config, args.acl, args.local) containers_runs = [executor.submit(create_container, endpoint, policy, container_creation_retry, wallet, wallet_config, rules, args.local)
for _, endpoint in for _, endpoint, policy in
zip(range(containers_count), cycle(endpoints))] zip(range(containers_count), cycle(endpoints), cycle(args.policy))]
for run in containers_runs: for run in containers_runs:
container_id = run.result() container_id = run.result()

View file

@ -11,6 +11,12 @@ from concurrent.futures import ProcessPoolExecutor
from helpers.cmd import random_payload from helpers.cmd import random_payload
from helpers.aws_cli import create_bucket, upload_object from helpers.aws_cli import create_bucket, upload_object
ERROR_WRONG_CONTAINERS_COUNT = 1
ERROR_WRONG_OBJECTS_COUNT = 2
ERROR_WRONG_PERCENTAGE = 3
MAX_WORKERS = 50
DEFAULT_LOCATION = ""
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument('--size', help='Upload objects size in kb.') parser.add_argument('--size', help='Upload objects size in kb.')
@ -20,8 +26,9 @@ parser.add_argument('--preload_obj', help='Number of pre-loaded objects.')
parser.add_argument('--endpoint', help='S3 Gateways addresses separated by comma.') parser.add_argument('--endpoint', help='S3 Gateways addresses separated by comma.')
parser.add_argument('--update', help='True/False, False by default. Save existed buckets from target file (--out). ' parser.add_argument('--update', help='True/False, False by default. Save existed buckets from target file (--out). '
'New buckets will not be created.') 'New buckets will not be created.')
parser.add_argument('--location', help='AWS location. Will be empty, if has not be declared.', default="") parser.add_argument('--location', help=f'AWS location constraint. Default is "{DEFAULT_LOCATION}"', action="append")
parser.add_argument('--versioning', help='True/False, False by default.') parser.add_argument('--versioning', help='True/False, False by default. Alias of --buckets_versioned=100')
parser.add_argument('--buckets_versioned', help='Percent of versioned buckets. Default is 0', default=0)
parser.add_argument('--ignore-errors', help='Ignore preset errors', action='store_true') parser.add_argument('--ignore-errors', help='Ignore preset errors', action='store_true')
parser.add_argument('--no-verify-ssl', help='Ignore SSL verifications', action='store_true') parser.add_argument('--no-verify-ssl', help='Ignore SSL verifications', action='store_true')
parser.add_argument('--workers', help='Count of workers in preset. Max = 50, Default = 50', default=50) parser.add_argument('--workers', help='Count of workers in preset. Max = 50, Default = 50', default=50)
@ -32,10 +39,6 @@ parser.add_argument('--acl', help='Bucket ACL. Default is private. Expected valu
args = parser.parse_args() args = parser.parse_args()
print(args) print(args)
ERROR_WRONG_CONTAINERS_COUNT = 1
ERROR_WRONG_OBJECTS_COUNT = 2
MAX_WORKERS = 50
def main(): def main():
buckets = [] buckets = []
objects_list = [] objects_list = []
@ -43,6 +46,8 @@ def main():
no_verify_ssl = args.no_verify_ssl no_verify_ssl = args.no_verify_ssl
endpoints = args.endpoint.split(',') endpoints = args.endpoint.split(',')
if not args.location:
args.location = [DEFAULT_LOCATION]
workers = int(args.workers) workers = int(args.workers)
objects_per_bucket = int(args.preload_obj) objects_per_bucket = int(args.preload_obj)
@ -59,9 +64,18 @@ def main():
print(f"Create buckets: {buckets_count}") print(f"Create buckets: {buckets_count}")
with ProcessPoolExecutor(max_workers=min(MAX_WORKERS, workers)) as executor: with ProcessPoolExecutor(max_workers=min(MAX_WORKERS, workers)) as executor:
buckets_runs = [executor.submit(create_bucket, endpoint, args.versioning, args.location, args.acl, no_verify_ssl) if not 0 <= int(args.buckets_versioned) <= 100:
for _, endpoint in print(f"Percent of versioned buckets must be between 0 and 100: got {args.buckets_versioned}")
zip(range(buckets_count), cycle(endpoints))] if not ignore_errors:
sys.exit(ERROR_WRONG_PERCENTAGE)
if args.versioning == "True":
versioning_per_bucket = [True] * buckets_count
else:
num_versioned_buckets = int((int(args.buckets_versioned) / 100) * buckets_count)
versioning_per_bucket = [True] * num_versioned_buckets + [False] * (buckets_count - num_versioned_buckets)
buckets_runs = [executor.submit(create_bucket, endpoint, versioning_per_bucket[i], location, args.acl, no_verify_ssl)
for i, endpoint, location in
zip(range(buckets_count), cycle(endpoints), cycle(args.location))]
for run in buckets_runs: for run in buckets_runs:
bucket_name = run.result() bucket_name = run.result()

View file

@ -21,6 +21,7 @@ Scenarios `grpc.js`, `local.js`, `http.js` and `s3.js` support the following opt
* `PAYLOAD_TYPE` - type of an object payload ("random" or "text", default: "random"). * `PAYLOAD_TYPE` - type of an object payload ("random" or "text", default: "random").
* `STREAMING` - if set, the payload is generated on the fly and is not read into memory fully. * `STREAMING` - if set, the payload is generated on the fly and is not read into memory fully.
* `METRIC_TAGS` - custom metrics tags (format `tag1:value1;tag2:value2`). * `METRIC_TAGS` - custom metrics tags (format `tag1:value1;tag2:value2`).
* `DATE_FORMAT` - custom datetime format: `timeonly` (default), `datetime` or `none`.
Additionally, the profiling extension can be enabled to generate CPU and memory profiles which can be inspected with `go tool pprof file.prof`: Additionally, the profiling extension can be enabled to generate CPU and memory profiles which can be inspected with `go tool pprof file.prof`:
```shell ```shell
@ -125,7 +126,7 @@ The tests will use all pre-created buckets for PUT operations and all pre-create
$ ./scenarios/preset/preset_s3.py --size 1024 --buckets 1 --out s3_1024kb.json --endpoint host1:8084 --preload_obj 500 --location load-1-4 $ ./scenarios/preset/preset_s3.py --size 1024 --buckets 1 --out s3_1024kb.json --endpoint host1:8084 --preload_obj 500 --location load-1-4
``` ```
* '--location' - specify the name of container policy (from policy.json file). It's important to run 'aws configure' each time when the policy file has been changed to pick up the latest policies. * '--location' - specify the name of container policy (from policy.json file). It's important to run 'aws configure' each time when the policy file has been changed to pick up the latest policies.
* '--buckets_versioned' - specify the percentage of versioned buckets from the total number of created buckets. Default is 0
3. Execute scenario with options: 3. Execute scenario with options:
```shell ```shell
@ -138,6 +139,8 @@ Options (in addition to the common options):
* `DELETE_AGE` - age of object in seconds before which it can not be deleted. This parameter can be used to control how many objects we have in the system under load. * `DELETE_AGE` - age of object in seconds before which it can not be deleted. This parameter can be used to control how many objects we have in the system under load.
* `SLEEP_DELETE` - time interval (in seconds) between deleting VU iterations. * `SLEEP_DELETE` - time interval (in seconds) between deleting VU iterations.
* `OBJ_NAME` - if specified, this name will be used for all write operations instead of random generation. * `OBJ_NAME` - if specified, this name will be used for all write operations instead of random generation.
* `OBJ_NAME_LENGTH` - if specified, then name of the object will be generated with the specified length of ASCII characters.
* `DIR_HEIGHT`, `DIR_WIDTH` - if both specified, object name will consist of `DIR_HEIGHT` directories, each of which can have `DIR_WIDTH` subdirectories, for example for `DIR_HEIGHT = 3, DIR_WIDTH = 100`, object names will be `/dir{1...100}/dir{1...100}/dir{1...100}/{uuid || OBJ_NAME}`
## S3 Multipart ## S3 Multipart

View file

@ -6,10 +6,10 @@ import registry from 'k6/x/frostfs/registry';
import s3 from 'k6/x/frostfs/s3'; import s3 from 'k6/x/frostfs/s3';
import stats from 'k6/x/frostfs/stats'; import stats from 'k6/x/frostfs/stats';
import {newGenerator} from './libs/datagen.js'; import {generateS3Key} from './libs/keygen.js';
import {parseEnv} from './libs/env-parser.js'; import {parseEnv} from './libs/env-parser.js';
import {textSummary} from './libs/k6-summary-0.0.2.js'; import {textSummary} from './libs/k6-summary-0.0.2.js';
import {uuidv4} from './libs/k6-utils-1.4.0.js'; import {newGenerator} from './libs/datagen.js';
parseEnv(); parseEnv();
@ -132,6 +132,10 @@ export function setup() {
const start_timestamp = Date.now() const start_timestamp = Date.now()
console.log( console.log(
`Load started at: ${Date(start_timestamp).toString()}`) `Load started at: ${Date(start_timestamp).toString()}`)
if (delete_vu_count > 0){
obj_to_delete_selector.sync.add(delete_vu_count)
}
} }
export function teardown(data) { export function teardown(data) {
@ -155,7 +159,7 @@ export function obj_write() {
sleep(__ENV.SLEEP_WRITE); sleep(__ENV.SLEEP_WRITE);
} }
const key = __ENV.OBJ_NAME || uuidv4(); const key = generateS3Key();
const bucket = bucket_list[Math.floor(Math.random() * bucket_list.length)]; const bucket = bucket_list[Math.floor(Math.random() * bucket_list.length)];
const payload = generator.genPayload(); const payload = generator.genPayload();
@ -204,6 +208,8 @@ export function obj_delete() {
const obj = obj_to_delete_selector.nextObject(); const obj = obj_to_delete_selector.nextObject();
if (!obj) { if (!obj) {
if (obj_to_delete_exit_on_null) { if (obj_to_delete_exit_on_null) {
obj_to_delete_selector.sync.done()
obj_to_delete_selector.sync.wait()
exec.test.abort("No more objects to select"); exec.test.abort("No more objects to select");
} }
return; return;

View file

@ -5,10 +5,10 @@ import registry from 'k6/x/frostfs/registry';
import s3 from 'k6/x/frostfs/s3'; import s3 from 'k6/x/frostfs/s3';
import stats from 'k6/x/frostfs/stats'; import stats from 'k6/x/frostfs/stats';
import {generateS3Key} from './libs/keygen.js';
import {newGenerator} from './libs/datagen.js'; import {newGenerator} from './libs/datagen.js';
import {parseEnv} from './libs/env-parser.js'; import {parseEnv} from './libs/env-parser.js';
import {textSummary} from './libs/k6-summary-0.0.2.js'; import {textSummary} from './libs/k6-summary-0.0.2.js';
import {uuidv4} from './libs/k6-utils-1.4.0.js';
parseEnv(); parseEnv();
@ -177,7 +177,7 @@ export function obj_write() {
sleep(__ENV.SLEEP_WRITE); sleep(__ENV.SLEEP_WRITE);
} }
const key = __ENV.OBJ_NAME || uuidv4(); const key = generateS3Key();
const bucket = bucket_list[Math.floor(Math.random() * bucket_list.length)]; const bucket = bucket_list[Math.floor(Math.random() * bucket_list.length)];
const payload = generator.genPayload(); const payload = generator.genPayload();

View file

@ -6,10 +6,10 @@ import registry from 'k6/x/frostfs/registry';
import s3 from 'k6/x/frostfs/s3'; import s3 from 'k6/x/frostfs/s3';
import stats from 'k6/x/frostfs/stats'; import stats from 'k6/x/frostfs/stats';
import {generateS3Key} from './libs/keygen.js';
import {newGenerator} from './libs/datagen.js'; import {newGenerator} from './libs/datagen.js';
import {parseEnv} from './libs/env-parser.js'; import {parseEnv} from './libs/env-parser.js';
import {textSummary} from './libs/k6-summary-0.0.2.js'; import {textSummary} from './libs/k6-summary-0.0.2.js';
import {uuidv4} from './libs/k6-utils-1.4.0.js';
parseEnv(); parseEnv();
@ -71,11 +71,23 @@ if (write_vu_count > 0) {
}; };
} }
const read_vu_count = parseInt(__ENV.READERS || '0');
if (read_vu_count > 0) {
scenarios.read = {
executor : 'constant-vus',
vus : read_vu_count,
duration : `${duration}s`,
exec : 'obj_read',
gracefulStop : '5s',
};
}
const delete_age = __ENV.DELETE_AGE ? parseInt(__ENV.DELETE_AGE) : undefined; const delete_age = __ENV.DELETE_AGE ? parseInt(__ENV.DELETE_AGE) : undefined;
let obj_to_delete_selector = undefined; let obj_to_delete_selector = undefined;
let obj_to_delete_exit_on_null = undefined; let obj_to_delete_exit_on_null = undefined;
if (registry_enabled && delete_age) {
obj_to_delete_exit_on_null = write_vu_count == 0; if (registry_enabled ) {
obj_to_delete_exit_on_null = (write_vu_count == 0) && (read_vu_count == 0)
let constructor = obj_to_delete_exit_on_null ? registry.getOneshotSelector let constructor = obj_to_delete_exit_on_null ? registry.getOneshotSelector
: registry.getSelector; : registry.getSelector;
@ -88,16 +100,7 @@ if (registry_enabled && delete_age) {
}); });
} }
const read_vu_count = parseInt(__ENV.READERS || '0');
if (read_vu_count > 0) {
scenarios.read = {
executor : 'constant-vus',
vus : read_vu_count,
duration : `${duration}s`,
exec : 'obj_read',
gracefulStop : '5s',
};
}
const delete_vu_count = parseInt(__ENV.DELETERS || '0'); const delete_vu_count = parseInt(__ENV.DELETERS || '0');
if (delete_vu_count > 0) { if (delete_vu_count > 0) {
@ -156,7 +159,7 @@ export function obj_write() {
sleep(__ENV.SLEEP_WRITE); sleep(__ENV.SLEEP_WRITE);
} }
const key = __ENV.OBJ_NAME || uuidv4(); const key = generateS3Key();
const bucket = bucket_list[Math.floor(Math.random() * bucket_list.length)]; const bucket = bucket_list[Math.floor(Math.random() * bucket_list.length)];
const payload = generator.genPayload(); const payload = generator.genPayload();

View file

@ -5,10 +5,10 @@ import registry from 'k6/x/frostfs/registry';
import s3 from 'k6/x/frostfs/s3'; import s3 from 'k6/x/frostfs/s3';
import stats from 'k6/x/frostfs/stats'; import stats from 'k6/x/frostfs/stats';
import {generateS3Key} from './libs/keygen.js';
import {newGenerator} from './libs/datagen.js'; import {newGenerator} from './libs/datagen.js';
import {parseEnv} from './libs/env-parser.js'; import {parseEnv} from './libs/env-parser.js';
import {textSummary} from './libs/k6-summary-0.0.2.js'; import {textSummary} from './libs/k6-summary-0.0.2.js';
import {uuidv4} from './libs/k6-utils-1.4.0.js';
parseEnv(); parseEnv();
@ -101,7 +101,7 @@ export function obj_write_multipart() {
sleep(__ENV.SLEEP_WRITE); sleep(__ENV.SLEEP_WRITE);
} }
const key = __ENV.OBJ_NAME || uuidv4(); const key = generateS3Key();
const bucket = bucket_list[Math.floor(Math.random() * bucket_list.length)]; const bucket = bucket_list[Math.floor(Math.random() * bucket_list.length)];
const payload = generator.genPayload(); const payload = generator.genPayload();

View file

@ -5,6 +5,7 @@ import registry from 'k6/x/frostfs/registry';
import s3local from 'k6/x/frostfs/s3local'; import s3local from 'k6/x/frostfs/s3local';
import stats from 'k6/x/frostfs/stats'; import stats from 'k6/x/frostfs/stats';
import {generateS3Key} from './libs/keygen.js';
import {newGenerator} from './libs/datagen.js'; import {newGenerator} from './libs/datagen.js';
import {parseEnv} from './libs/env-parser.js'; import {parseEnv} from './libs/env-parser.js';
import {textSummary} from './libs/k6-summary-0.0.2.js'; import {textSummary} from './libs/k6-summary-0.0.2.js';
@ -131,7 +132,7 @@ export function handleSummary(data) {
} }
export function obj_write() { export function obj_write() {
const key = __ENV.OBJ_NAME || uuidv4(); const key = generateS3Key();
const bucket = bucket_list[Math.floor(Math.random() * bucket_list.length)]; const bucket = bucket_list[Math.floor(Math.random() * bucket_list.length)];
const payload = generator.genPayload(); const payload = generator.genPayload();

View file

@ -1,13 +1,13 @@
import {sleep} from 'k6'; import { sleep } from 'k6';
import {Counter} from 'k6/metrics'; import { Counter } from 'k6/metrics';
import logging from 'k6/x/frostfs/logging'; import logging from 'k6/x/frostfs/logging';
import native from 'k6/x/frostfs/native'; import native from 'k6/x/frostfs/native';
import registry from 'k6/x/frostfs/registry'; import registry from 'k6/x/frostfs/registry';
import s3 from 'k6/x/frostfs/s3'; import s3 from 'k6/x/frostfs/s3';
import stats from 'k6/x/frostfs/stats'; import stats from 'k6/x/frostfs/stats';
import {parseEnv} from './libs/env-parser.js'; import { parseEnv } from './libs/env-parser.js';
import {textSummary} from './libs/k6-summary-0.0.2.js'; import { textSummary } from './libs/k6-summary-0.0.2.js';
parseEnv(); parseEnv();
@ -44,16 +44,15 @@ if (__ENV.GRPC_ENDPOINTS) {
grpc_client = native.connect( grpc_client = native.connect(
grpcEndpoint, '', __ENV.DIAL_TIMEOUT ? parseInt(__ENV.DIAL_TIMEOUT) : 0, grpcEndpoint, '', __ENV.DIAL_TIMEOUT ? parseInt(__ENV.DIAL_TIMEOUT) : 0,
__ENV.STREAM_TIMEOUT ? parseInt(__ENV.STREAM_TIMEOUT) : 0, __ENV.STREAM_TIMEOUT ? parseInt(__ENV.STREAM_TIMEOUT) : 0,
__ENV.PREPARE_LOCALLY ? __ENV.PREPARE_LOCALLY.toLowerCase() === 'true' : __ENV.PREPARE_LOCALLY ? __ENV.PREPARE_LOCALLY.toLowerCase() === 'true' : false,
false, 1024 * parseInt(__ENV.MAX_OBJECT_SIZE || '0'));
'');
} }
// Connect to random S3 endpoint // Connect to random S3 endpoint
let s3_client = undefined; let s3_client = undefined;
if (__ENV.S3_ENDPOINTS) { if (__ENV.S3_ENDPOINTS) {
const no_verify_ssl = __ENV.NO_VERIFY_SSL || 'true'; const no_verify_ssl = __ENV.NO_VERIFY_SSL || 'true';
const connection_args = {no_verify_ssl: no_verify_ssl}; const connection_args = { no_verify_ssl: no_verify_ssl };
const s3_endpoints = __ENV.S3_ENDPOINTS.split(','); const s3_endpoints = __ENV.S3_ENDPOINTS.split(',');
const s3_endpoint = const s3_endpoint =
s3_endpoints[Math.floor(Math.random() * s3_endpoints.length)]; s3_endpoints[Math.floor(Math.random() * s3_endpoints.length)];
@ -68,7 +67,7 @@ const obj_to_verify_selector = registry.getSelector(
__ENV.REGISTRY_FILE, 'obj_to_verify', __ENV.REGISTRY_FILE, 'obj_to_verify',
__ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0, { __ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0, {
status: 'created', status: 'created',
}); });
const obj_to_verify_count = obj_to_verify_selector.count(); const obj_to_verify_count = obj_to_verify_selector.count();
// Execute at least one iteration (executor shared-iterations can't run 0 // Execute at least one iteration (executor shared-iterations can't run 0
// iterations) // iterations)
@ -98,14 +97,14 @@ export function setup() {
for (const [status, counter] of Object.entries(obj_counters)) { for (const [status, counter] of Object.entries(obj_counters)) {
const obj_selector = registry.getSelector( const obj_selector = registry.getSelector(
__ENV.REGISTRY_FILE, status, __ENV.REGISTRY_FILE, status,
__ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0, {status}); __ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0, { status });
counter.add(obj_selector.count()); counter.add(obj_selector.count());
} }
} }
export function handleSummary(data) { export function handleSummary(data) {
return { return {
'stdout': textSummary(data, {indent: ' ', enableColors: false}), 'stdout': textSummary(data, { indent: ' ', enableColors: false }),
[summary_json]: JSON.stringify(data), [summary_json]: JSON.stringify(data),
}; };
} }
@ -138,10 +137,10 @@ function verify_object_with_retries(obj, attempts) {
// ReferenceError: Cannot access a variable before initialization. // ReferenceError: Cannot access a variable before initialization.
let lg = log; let lg = log;
if (obj.c_id && obj.o_id) { if (obj.c_id && obj.o_id) {
lg = lg.withFields({cid: obj.c_id, oid: obj.o_id}); lg = lg.withFields({ cid: obj.c_id, oid: obj.o_id });
result = grpc_client.verifyHash(obj.c_id, obj.o_id, obj.payload_hash); result = grpc_client.verifyHash(obj.c_id, obj.o_id, obj.payload_hash);
} else if (obj.s3_bucket && obj.s3_key) { } else if (obj.s3_bucket && obj.s3_key) {
lg = lg.withFields({bucket: obj.s3_bucket, key: obj.s3_key}); lg = lg.withFields({ bucket: obj.s3_bucket, key: obj.s3_key });
result = result =
s3_client.verifyHash(obj.s3_bucket, obj.s3_key, obj.payload_hash); s3_client.verifyHash(obj.s3_bucket, obj.s3_key, obj.payload_hash);
} else { } else {