forked from TrueCloudLab/xk6-frostfs
Compare commits
38 commits
feauture-a
...
master
Author | SHA1 | Date | |
---|---|---|---|
ebbc5bc0a7 | |||
ddd86d1f23 | |||
f4b43b264f | |||
8f9c02253c | |||
829777bd53 | |||
3ebb3dda0a | |||
3c6023ca29 | |||
a326fbcbf8 | |||
14f26e47dc | |||
296971e57c | |||
76fd5c9706 | |||
f0cbf9c301 | |||
124397578d | |||
a7079cda60 | |||
d3d5a1baed | |||
72d24b04a3 | |||
f5df03c718 | |||
1c7a3b3b6c | |||
e0cbc3b763 | |||
54f99dac1d | |||
591f8af161 | |||
c2b8944af6 | |||
a47bf149d8 | |||
bcbd0db25f | |||
17bbbe53e6 | |||
bede693470 | |||
f539da7d89 | |||
6d3ecb6528 | |||
75f670b392 | |||
9b9db46a07 | |||
335c45c578 | |||
e7d4dd404a | |||
0a9aeab47c | |||
3bc1229062 | |||
e92ce668a8 | |||
6d1e7eb49e | |||
f90a645594 | |||
3f67606f02 |
52 changed files with 1101 additions and 405 deletions
|
@ -1 +0,0 @@
|
|||
* @TrueCloudLab/storage-core @TrueCloudLab/storage-services
|
|
@ -13,7 +13,7 @@ jobs:
|
|||
- name: Setup Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.21'
|
||||
go-version: '1.22'
|
||||
|
||||
- name: Run commit format checker
|
||||
uses: https://git.frostfs.info/TrueCloudLab/dco-go@v2
|
||||
|
|
|
@ -11,20 +11,21 @@ jobs:
|
|||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.22'
|
||||
go-version: '1.23'
|
||||
cache: true
|
||||
|
||||
- name: golangci-lint
|
||||
uses: https://github.com/golangci/golangci-lint-action@v3
|
||||
with:
|
||||
version: latest
|
||||
- name: Install linters
|
||||
run: make lint-install
|
||||
|
||||
- name: Run linters
|
||||
run: make lint
|
||||
|
||||
tests:
|
||||
name: Tests
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
go_versions: [ '1.21', '1.22' ]
|
||||
go_versions: [ '1.22', '1.23' ]
|
||||
fail-fast: false
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
@ -47,7 +48,7 @@ jobs:
|
|||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.21'
|
||||
go-version: '1.22'
|
||||
cache: true
|
||||
|
||||
- name: Run tests
|
||||
|
|
1
.gitattributes
vendored
Normal file
1
.gitattributes
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
/go.sum -diff
|
3
CODEOWNERS
Normal file
3
CODEOWNERS
Normal file
|
@ -0,0 +1,3 @@
|
|||
.* @TrueCloudLab/storage-core-committers @TrueCloudLab/storage-core-developers @TrueCloudLab/storage-services-committers @TrueCloudLab/storage-services-developers
|
||||
.forgejo/.* @potyarkin
|
||||
Makefile @potyarkin
|
|
@ -3,8 +3,8 @@
|
|||
First, thank you for contributing! We love and encourage pull requests from
|
||||
everyone. Please follow the guidelines:
|
||||
|
||||
- Check the open [issues](https://github.com/TrueCloudLab/xk6-frostfs/issues) and
|
||||
[pull requests](https://github.com/TrueCloudLab/xk6-frostfs/pulls) for existing
|
||||
- Check the open [issues](https://git.frostfs.info/TrueCloudLab/xk6-frostfs/issues) and
|
||||
[pull requests](https://git.frostfs.info/TrueCloudLab/xk6-frostfs/pulls) for existing
|
||||
discussions.
|
||||
|
||||
- Open an issue first, to discuss a new feature or enhancement.
|
||||
|
@ -27,19 +27,20 @@ Start by forking the `xk6-frostfs` repository, make changes in a branch and then
|
|||
send a pull request. We encourage pull requests to discuss code changes. Here
|
||||
are the steps in details:
|
||||
|
||||
### Set up your GitHub Repository
|
||||
Fork [xk6-frostfs upstream](https://github.com/TrueCloudLab/xk6-frostfs/fork) source
|
||||
### Set up your repository
|
||||
|
||||
Fork [xk6-frostfs upstream](https://git.frostfs.info/TrueCloudLab/xk6-frostfs/fork) source
|
||||
repository to your own personal repository. Copy the URL of your fork (you will
|
||||
need it for the `git clone` command below).
|
||||
|
||||
```sh
|
||||
$ git clone https://github.com/TrueCloudLab/xk6-frostfs
|
||||
$ git clone https://git.frostfs.info/TrueCloudLab/xk6-frostfs
|
||||
```
|
||||
|
||||
### Set up git remote as ``upstream``
|
||||
```sh
|
||||
$ cd xk6-frostfs
|
||||
$ git remote add upstream https://github.com/TrueCloudLab/xk6-frostfs
|
||||
$ git remote add upstream https://git.frostfs.info/TrueCloudLab/xk6-frostfs
|
||||
$ git fetch upstream
|
||||
$ git merge upstream/master
|
||||
...
|
||||
|
@ -89,7 +90,7 @@ $ git push origin feature/123-something_awesome
|
|||
```
|
||||
|
||||
### Create a Pull Request
|
||||
Pull requests can be created via GitHub. Refer to [this
|
||||
Pull requests can be created via git.frostfs.info. Refer to [this
|
||||
document](https://help.github.com/articles/creating-a-pull-request/) for
|
||||
detailed steps on how to create a pull request. After a Pull Request gets peer
|
||||
reviewed and approved, it will be merged.
|
||||
|
|
26
Makefile
26
Makefile
|
@ -3,10 +3,15 @@
|
|||
# Common variables
|
||||
REPO ?= $(shell go list -m)
|
||||
VERSION ?= $(shell git describe --tags --dirty --match "v*" --always --abbrev=8 2>/dev/null || cat VERSION 2>/dev/null || echo "develop")
|
||||
GO_VERSION ?= 1.19
|
||||
LINT_VERSION ?= 1.49.0
|
||||
GO_VERSION ?= 1.22
|
||||
LINT_VERSION ?= 1.60.3
|
||||
TRUECLOUDLAB_LINT_VERSION ?= 0.0.7
|
||||
BINDIR = bin
|
||||
|
||||
OUTPUT_LINT_DIR ?= $(abspath $(BINDIR))/linters
|
||||
LINT_DIR = $(OUTPUT_LINT_DIR)/golangci-lint-$(LINT_VERSION)-v$(TRUECLOUDLAB_LINT_VERSION)
|
||||
TMP_DIR := .cache
|
||||
|
||||
# Binaries to build
|
||||
CMDS = $(addprefix frostfs-, $(notdir $(wildcard cmd/*)))
|
||||
BINS = $(addprefix $(BINDIR)/, $(CMDS))
|
||||
|
@ -64,7 +69,22 @@ format:
|
|||
|
||||
# Run linters
|
||||
lint:
|
||||
@golangci-lint --timeout=5m run
|
||||
@if [ ! -d "$(LINT_DIR)" ]; then \
|
||||
make lint-install; \
|
||||
fi
|
||||
$(LINT_DIR)/golangci-lint run --timeout=5m
|
||||
|
||||
# Install linters
|
||||
lint-install:
|
||||
@rm -rf $(OUTPUT_LINT_DIR)
|
||||
@mkdir -p $(OUTPUT_LINT_DIR)
|
||||
@mkdir -p $(TMP_DIR)
|
||||
@rm -rf $(TMP_DIR)/linters
|
||||
@git -c advice.detachedHead=false clone --branch v$(TRUECLOUDLAB_LINT_VERSION) https://git.frostfs.info/TrueCloudLab/linters.git $(TMP_DIR)/linters
|
||||
@@make -C $(TMP_DIR)/linters lib CGO_ENABLED=1 OUT_DIR=$(OUTPUT_LINT_DIR)
|
||||
@rm -rf $(TMP_DIR)/linters
|
||||
@rmdir $(TMP_DIR) 2>/dev/null || true
|
||||
@CGO_ENABLED=1 GOBIN=$(LINT_DIR) go install -trimpath github.com/golangci/golangci-lint/cmd/golangci-lint@v$(LINT_VERSION)
|
||||
|
||||
# Run linters in Docker
|
||||
docker/lint:
|
||||
|
|
29
README.md
29
README.md
|
@ -1,5 +1,5 @@
|
|||
<p align="center">
|
||||
<img src="./.github/logo.svg" width="500px" alt="FrostFS logo">
|
||||
<img src="./.forgejo/logo.svg" width="500px" alt="FrostFS logo">
|
||||
</p>
|
||||
<p align="center">
|
||||
<a href="https://go.k6.io/k6">k6</a> extension to test and benchmark FrostFS related protocols.
|
||||
|
@ -48,15 +48,16 @@ Create native client with `connect` method. Arguments:
|
|||
- dial timeout in seconds (0 for the default value)
|
||||
- stream timeout in seconds (0 for the default value)
|
||||
- generate object header on the client side (for big object - split locally too)
|
||||
- max size for generated object header on the client side (for big object - the size that the object is splitted into)
|
||||
|
||||
```js
|
||||
import native from 'k6/x/frostfs/native';
|
||||
const frostfs_cli = native.connect("s01.frostfs.devenv:8080", "", 0, 0, false)
|
||||
const frostfs_cli = native.connect("s01.frostfs.devenv:8080", "", 0, 0, false, 0)
|
||||
```
|
||||
|
||||
### Methods
|
||||
- `putContainer(params)`. The `params` is a dictionary (e.g.
|
||||
`{acl:'public-read-write',placement_policy:'REP 3',name:'container-name',name_global_scope:'false'}`).
|
||||
`{placement_policy:'REP 3',name:'container-name',name_global_scope:'false'}`).
|
||||
Returns dictionary with `success`
|
||||
boolean flag, `container_id` string, and `error` string.
|
||||
- `setBufferSize(size)`. Sets internal buffer size for data upload and
|
||||
|
@ -106,11 +107,12 @@ const s3_cli = s3.connect("https://s3.frostfs.devenv:8080")
|
|||
You can also provide additional options:
|
||||
```js
|
||||
import s3 from 'k6/x/frostfs/s3';
|
||||
const s3_cli = s3.connect("https://s3.frostfs.devenv:8080", {'no_verify_ssl': 'true', 'timeout': '60s'})
|
||||
const s3_cli = s3.connect("https://s3.frostfs.devenv:8080", {'no_verify_ssl': 'true', 'timeout': '60s', 'aws_profile': 'metal'})
|
||||
```
|
||||
|
||||
* `no_verify_ss` - Bool. If `true` - skip verifying the s3 certificate chain and host name (useful if s3 uses self-signed certificates)
|
||||
* `timeout` - Duration. Set timeout for requests (in http client). If omitted or zero - timeout is infinite.
|
||||
* `aws_profile` - String. Use custom profile credentials from `$HOME/.aws/credentials` file. If omitted or empty - use default profile.
|
||||
|
||||
### Methods
|
||||
- `createBucket(bucket, params)`. Returns dictionary with `success` boolean flag
|
||||
|
@ -185,6 +187,25 @@ Flags:
|
|||
-v, --version version for registry-exporter
|
||||
```
|
||||
|
||||
## Import pregen into registry db
|
||||
|
||||
You can import pregenerated json files into registry bolt db. Use `frostfs-xk6-registry import`. Usage examples are in help:
|
||||
|
||||
```shell
|
||||
$ ./bin/frostfs-xk6-registry import -h
|
||||
Import objects into registry from pregenerated files
|
||||
|
||||
Usage:
|
||||
xk6-registry import [flags]
|
||||
|
||||
Examples:
|
||||
xk6-registry import registry.bolt preset.json
|
||||
xk6-registry import registry.bolt preset.json another_preset.json
|
||||
|
||||
Flags:
|
||||
-h, --help help for import
|
||||
```
|
||||
|
||||
# License
|
||||
|
||||
- [GNU General Public License v3.0](LICENSE)
|
||||
|
|
55
cmd/xk6-registry/importer/import.go
Normal file
55
cmd/xk6-registry/importer/import.go
Normal file
|
@ -0,0 +1,55 @@
|
|||
package importer
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"os"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/registry"
|
||||
)
|
||||
|
||||
type PreGenObj struct {
|
||||
Bucket string `json:"bucket"`
|
||||
Object string `json:"object"`
|
||||
Container string `json:"container"`
|
||||
}
|
||||
|
||||
type PreGenerateInfo struct {
|
||||
Buckets []string `json:"buckets"`
|
||||
Containers []string `json:"containers"`
|
||||
Objects []PreGenObj `json:"objects"`
|
||||
ObjSize string `json:"obj_size"`
|
||||
}
|
||||
|
||||
// ImportJSONPreGen writes objects from pregenerated JSON file
|
||||
// to the registry.
|
||||
// Note that ImportJSONPreGen does not check if object already
|
||||
// exists in the registry so in case of re-entry the registry
|
||||
// will have two entities representing the same object.
|
||||
func ImportJSONPreGen(o *registry.ObjRegistry, filename string) error {
|
||||
f, err := os.ReadFile(filename)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var pregenInfo PreGenerateInfo
|
||||
err = json.Unmarshal(f, &pregenInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// AddObject uses DB.Batch to combine concurrent Batch calls
|
||||
// into a single Bolt transaction. DB.Batch is limited by
|
||||
// DB.MaxBatchDelay which may affect perfomance.
|
||||
for _, obj := range pregenInfo.Objects {
|
||||
if obj.Bucket != "" {
|
||||
err = o.AddObject("", "", obj.Bucket, obj.Object, "")
|
||||
} else {
|
||||
err = o.AddObject(obj.Container, obj.Object, "", "", "")
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
27
cmd/xk6-registry/importer/root.go
Normal file
27
cmd/xk6-registry/importer/root.go
Normal file
|
@ -0,0 +1,27 @@
|
|||
package importer
|
||||
|
||||
import (
|
||||
"git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/registry"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// Cmd represents the import command.
|
||||
var Cmd = &cobra.Command{
|
||||
Use: "import",
|
||||
Short: "Import objects into registry",
|
||||
Long: "Import objects into registry from pregenerated files",
|
||||
Example: `xk6-registry import registry.bolt preset.json
|
||||
xk6-registry import registry.bolt preset.json another_preset.json`,
|
||||
RunE: runCmd,
|
||||
Args: cobra.MinimumNArgs(2),
|
||||
}
|
||||
|
||||
func runCmd(cmd *cobra.Command, args []string) error {
|
||||
objRegistry := registry.NewObjRegistry(cmd.Context(), args[0])
|
||||
for i := 1; i < len(args); i++ {
|
||||
if err := ImportJSONPreGen(objRegistry, args[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
18
cmd/xk6-registry/main.go
Normal file
18
cmd/xk6-registry/main.go
Normal file
|
@ -0,0 +1,18 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func main() {
|
||||
ctx, _ := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM, syscall.SIGHUP)
|
||||
|
||||
if cmd, err := rootCmd.ExecuteContextC(ctx); err != nil {
|
||||
cmd.PrintErrln("Error:", err.Error())
|
||||
cmd.PrintErrf("Run '%v --help' for usage.\n", cmd.CommandPath())
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
33
cmd/xk6-registry/root.go
Normal file
33
cmd/xk6-registry/root.go
Normal file
|
@ -0,0 +1,33 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/xk6-frostfs/cmd/xk6-registry/importer"
|
||||
"git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/version"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var rootCmd = &cobra.Command{
|
||||
Use: "xk6-registry",
|
||||
Version: version.Version,
|
||||
Short: "Command Line Tool to work with Registry",
|
||||
Long: `Registry provides tools to work with object registry for xk6.
|
||||
It contains command for importing objects in registry from preset`,
|
||||
SilenceErrors: true,
|
||||
SilenceUsage: true,
|
||||
Run: rootCmdRun,
|
||||
}
|
||||
|
||||
func init() {
|
||||
cobra.AddTemplateFunc("runtimeVersion", runtime.Version)
|
||||
rootCmd.SetVersionTemplate(`FrostFS xk6-registry
|
||||
{{printf "Version: %s" .Version }}
|
||||
GoVersion: {{ runtimeVersion }}
|
||||
`)
|
||||
rootCmd.AddCommand(importer.Cmd)
|
||||
}
|
||||
|
||||
func rootCmdRun(cmd *cobra.Command, _ []string) {
|
||||
_ = cmd.Usage()
|
||||
}
|
|
@ -1,7 +1,8 @@
|
|||
import local from 'k6/x/frostfs/local';
|
||||
import datagen from 'k6/x/frostfs/datagen';
|
||||
import { uuidv4 } from '../scenarios/libs/k6-utils-1.4.0.js';
|
||||
|
||||
const payload = open('../go.sum', 'b');
|
||||
const generator = datagen.generator(1024, "random", false);
|
||||
const local_cli = local.connect("/path/to/config.yaml", "/path/to/config/dir", "", false)
|
||||
|
||||
export const options = {
|
||||
|
@ -15,6 +16,7 @@ export default function () {
|
|||
'unique_header': uuidv4()
|
||||
}
|
||||
const container_id = '6BVPPXQewRJ6J5EYmAPLczXxNocS7ikyF7amS2esWQnb';
|
||||
const payload = generator.genPayload()
|
||||
let resp = local_cli.put(container_id, headers, payload)
|
||||
if (resp.success) {
|
||||
local_cli.get(container_id, resp.object_id)
|
||||
|
|
|
@ -1,9 +1,10 @@
|
|||
import native from 'k6/x/frostfs/native';
|
||||
import datagen from 'k6/x/frostfs/datagen';
|
||||
import { fail } from "k6";
|
||||
import { uuidv4 } from '../scenarios/libs/k6-utils-1.4.0.js';
|
||||
|
||||
const payload = open('../go.sum', 'b');
|
||||
const frostfs_cli = native.connect("s01.frostfs.devenv:8080", "1dd37fba80fec4e6a6f13fd708d8dcb3b29def768017052f6c930fa1c5d90bbb", 0, 0, false)
|
||||
const generator = datagen.generator(1024, "random", false);
|
||||
const frostfs_cli = native.connect("s01.frostfs.devenv:8080", "1dd37fba80fec4e6a6f13fd708d8dcb3b29def768017052f6c930fa1c5d90bbb", 0, 0, false, 0)
|
||||
|
||||
export const options = {
|
||||
stages: [
|
||||
|
@ -13,7 +14,6 @@ export const options = {
|
|||
|
||||
export function setup() {
|
||||
const params = {
|
||||
acl: 'public-read-write',
|
||||
placement_policy: 'REP 3',
|
||||
name: 'container-name',
|
||||
name_global_scope: 'false'
|
||||
|
@ -28,6 +28,7 @@ export function setup() {
|
|||
}
|
||||
|
||||
export default function (data) {
|
||||
const payload = generator.genPayload()
|
||||
let headers = {
|
||||
'unique_header': uuidv4()
|
||||
}
|
||||
|
|
|
@ -1,9 +1,11 @@
|
|||
import native from 'k6/x/frostfs/native';
|
||||
import datagen from 'k6/x/frostfs/datagen';
|
||||
import { uuidv4 } from '../scenarios/libs/k6-utils-1.4.0.js';
|
||||
|
||||
const payload = open('../go.sum', 'b');
|
||||
const generator = datagen.generator(1024, "random", false);
|
||||
const payload = generator.genPayload()
|
||||
const container = "AjSxSNNXbJUDPqqKYm1VbFVDGCakbpUNH8aGjPmGAH3B"
|
||||
const frostfs_cli = native.connect("s01.frostfs.devenv:8080", "", 0, 0, false)
|
||||
const frostfs_cli = native.connect("s01.frostfs.devenv:8080", "", 0, 0, false, 0)
|
||||
const frostfs_obj = frostfs_cli.onsite(container, payload)
|
||||
|
||||
export const options = {
|
||||
|
|
|
@ -1,8 +1,9 @@
|
|||
import s3 from 'k6/x/frostfs/s3';
|
||||
import datagen from 'k6/x/frostfs/datagen';
|
||||
import { fail } from 'k6'
|
||||
import { uuidv4 } from '../scenarios/libs/k6-utils-1.4.0.js';
|
||||
|
||||
const payload = open('../go.sum', 'b');
|
||||
const generator = datagen.generator(1024, "random", false);
|
||||
const bucket = "cats"
|
||||
const s3_cli = s3.connect("https://s3.frostfs.devenv:8080", {'no_verify_ssl': 'true'})
|
||||
|
||||
|
@ -16,7 +17,6 @@ export function setup() {
|
|||
const params = {
|
||||
acl: 'private',
|
||||
lock_enabled: 'true',
|
||||
location_constraint: 'ru'
|
||||
}
|
||||
|
||||
const res = s3_cli.createBucket(bucket, params)
|
||||
|
@ -27,6 +27,7 @@ export function setup() {
|
|||
|
||||
export default function () {
|
||||
const key = uuidv4();
|
||||
const payload = generator.genPayload()
|
||||
if (s3_cli.put(bucket, key, payload).success) {
|
||||
s3_cli.get(bucket, key)
|
||||
}
|
||||
|
|
|
@ -1,14 +1,16 @@
|
|||
import s3local from 'k6/x/frostfs/s3local';
|
||||
import datagen from 'k6/x/frostfs/datagen';
|
||||
import { uuidv4 } from '../scenarios/libs/k6-utils-1.4.0.js';
|
||||
|
||||
const bucket = "testbucket"
|
||||
const payload = open('../go.sum', 'b');
|
||||
const generator = datagen.generator(1024, "random", false);
|
||||
const s3local_cli = s3local.connect("path/to/storage/config.yml", "path/to/storage/config/dir", {}, {
|
||||
'testbucket': 'GBQDDUM1hdodXmiRHV57EUkFWJzuntsG8BG15wFSwam6',
|
||||
});
|
||||
|
||||
export default function () {
|
||||
const key = uuidv4();
|
||||
const payload = generator.genPayload()
|
||||
if (s3local_cli.put(bucket, key, payload).success) {
|
||||
s3local_cli.get(bucket, key)
|
||||
}
|
||||
|
|
148
go.mod
148
go.mod
|
@ -1,133 +1,141 @@
|
|||
module git.frostfs.info/TrueCloudLab/xk6-frostfs
|
||||
|
||||
go 1.21
|
||||
go 1.22
|
||||
|
||||
require (
|
||||
git.frostfs.info/TrueCloudLab/frostfs-node v0.38.3-0.20240502170333-ec2873caa7c6
|
||||
git.frostfs.info/TrueCloudLab/frostfs-s3-gw v0.29.0-rc.1.0.20240422122918-034396d554ec
|
||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240502080121-12ddefe07877
|
||||
git.frostfs.info/TrueCloudLab/frostfs-node v0.44.1-0.20250113100501-6c51f48aab69
|
||||
git.frostfs.info/TrueCloudLab/frostfs-s3-gw v0.32.0
|
||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250109084609-328d214d2d76
|
||||
git.frostfs.info/TrueCloudLab/tzhash v1.8.0
|
||||
github.com/aws/aws-sdk-go-v2 v1.19.0
|
||||
github.com/aws/aws-sdk-go-v2/config v1.18.28
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.72
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.37.0
|
||||
github.com/aws/aws-sdk-go-v2 v1.32.8
|
||||
github.com/aws/aws-sdk-go-v2/config v1.28.9
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.47
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.72.2
|
||||
github.com/dop251/goja v0.0.0-20230626124041-ba8a63e79201
|
||||
github.com/go-loremipsum/loremipsum v1.1.3
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/joho/godotenv v1.5.1
|
||||
github.com/nspcc-dev/neo-go v0.105.1
|
||||
github.com/nspcc-dev/neo-go v0.106.3
|
||||
github.com/panjf2000/ants/v2 v2.9.0
|
||||
github.com/sirupsen/logrus v1.9.3
|
||||
github.com/spf13/cobra v1.8.0
|
||||
github.com/stretchr/testify v1.8.4
|
||||
go.etcd.io/bbolt v1.3.8
|
||||
github.com/spf13/cobra v1.8.1
|
||||
github.com/stretchr/testify v1.9.0
|
||||
go.etcd.io/bbolt v1.3.10
|
||||
go.k6.io/k6 v0.45.1
|
||||
go.uber.org/zap v1.26.0
|
||||
golang.org/x/sys v0.18.0
|
||||
go.uber.org/zap v1.27.0
|
||||
golang.org/x/sys v0.28.0
|
||||
)
|
||||
|
||||
require (
|
||||
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240427200446-67c6f305b21f // indirect
|
||||
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240409115729-6eb492025bdd // indirect
|
||||
git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1-0.20241205083807-762d7f9f9f08 // indirect
|
||||
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 // indirect
|
||||
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20231101111734-b3ad3335ff65 // indirect
|
||||
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241112082307-f17779933e88 // indirect
|
||||
git.frostfs.info/TrueCloudLab/hrw v1.2.1 // indirect
|
||||
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240416071728-04a79f57ef1f // indirect
|
||||
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240822104152-a3bc3099bd5b // indirect
|
||||
git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0 // indirect
|
||||
git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02 // indirect
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
|
||||
github.com/aws/aws-sdk-go v1.44.296 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.13.27 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.5 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.35 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.29 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.36 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.27 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.11 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.30 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.29 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.14.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.12.13 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.13 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.19.3 // indirect
|
||||
github.com/aws/smithy-go v1.13.5 // indirect
|
||||
github.com/VictoriaMetrics/easyproto v0.1.4 // indirect
|
||||
github.com/antlr4-go/antlr/v4 v4.13.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.50 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.23 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.27 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.27 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.27 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.8 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.8 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.8 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.24.9 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.8 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.33.5 // indirect
|
||||
github.com/aws/smithy-go v1.22.1 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/bluele/gcache v0.0.2 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.2.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect
|
||||
github.com/dlclark/regexp2 v1.10.0 // indirect
|
||||
github.com/fatih/color v1.15.0 // indirect
|
||||
github.com/fsnotify/fsnotify v1.7.0 // indirect
|
||||
github.com/go-chi/chi/v5 v5.0.8 // indirect
|
||||
github.com/go-logr/logr v1.4.1 // indirect
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-pkgz/expirable-cache/v3 v3.0.0 // indirect
|
||||
github.com/go-sourcemap/sourcemap v2.1.4-0.20211119122758-180fcef48034+incompatible // indirect
|
||||
github.com/golang/snappy v0.0.4 // indirect
|
||||
github.com/google/pprof v0.0.0-20230817174616-7a8ec2ada47b // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 // indirect
|
||||
github.com/hashicorp/golang-lru v1.0.2 // indirect
|
||||
github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
|
||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||
github.com/holiman/uint256 v1.2.4 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/ipfs/go-cid v0.4.1 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/klauspost/compress v1.17.4 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.6 // indirect
|
||||
github.com/magiconair/properties v1.8.7 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.19 // indirect
|
||||
github.com/minio/sio v0.3.1 // indirect
|
||||
github.com/minio/sha256-simd v1.0.1 // indirect
|
||||
github.com/minio/sio v0.3.0 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/mr-tron/base58 v1.2.0 // indirect
|
||||
github.com/mstoykov/atlas v0.0.0-20220811071828-388f114305dd // indirect
|
||||
github.com/nats-io/nats.go v1.32.0 // indirect
|
||||
github.com/nats-io/nkeys v0.4.7 // indirect
|
||||
github.com/nats-io/nuid v1.0.1 // indirect
|
||||
github.com/nspcc-dev/go-ordered-json v0.0.0-20240112074137-296698a162ae // indirect
|
||||
github.com/nspcc-dev/rfc6979 v0.2.0 // indirect
|
||||
github.com/multiformats/go-base32 v0.1.0 // indirect
|
||||
github.com/multiformats/go-base36 v0.2.0 // indirect
|
||||
github.com/multiformats/go-multiaddr v0.14.0 // indirect
|
||||
github.com/multiformats/go-multibase v0.2.0 // indirect
|
||||
github.com/multiformats/go-multihash v0.2.3 // indirect
|
||||
github.com/multiformats/go-varint v0.0.7 // indirect
|
||||
github.com/nspcc-dev/go-ordered-json v0.0.0-20240301084351-0246b013f8b2 // indirect
|
||||
github.com/nspcc-dev/rfc6979 v0.2.1 // indirect
|
||||
github.com/onsi/gomega v1.20.2 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.1.1 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.2.2 // indirect
|
||||
github.com/pierrec/lz4 v2.6.1+incompatible // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/prometheus/client_golang v1.18.0 // indirect
|
||||
github.com/prometheus/client_golang v1.19.0 // indirect
|
||||
github.com/prometheus/client_model v0.5.0 // indirect
|
||||
github.com/prometheus/common v0.46.0 // indirect
|
||||
github.com/prometheus/common v0.48.0 // indirect
|
||||
github.com/prometheus/procfs v0.12.0 // indirect
|
||||
github.com/sagikazarmark/locafero v0.4.0 // indirect
|
||||
github.com/sagikazarmark/locafero v0.6.0 // indirect
|
||||
github.com/sagikazarmark/slog-shim v0.1.0 // indirect
|
||||
github.com/serenize/snaker v0.0.0-20201027110005-a7ad2135616e // indirect
|
||||
github.com/sourcegraph/conc v0.3.0 // indirect
|
||||
github.com/spaolacci/murmur3 v1.1.0 // indirect
|
||||
github.com/spf13/afero v1.11.0 // indirect
|
||||
github.com/spf13/cast v1.6.0 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/spf13/viper v1.18.2 // indirect
|
||||
github.com/spf13/viper v1.19.0 // indirect
|
||||
github.com/ssgreg/journald v1.0.0 // indirect
|
||||
github.com/subosito/gotenv v1.6.0 // indirect
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect
|
||||
github.com/twmb/murmur3 v1.1.8 // indirect
|
||||
go.opentelemetry.io/otel v1.22.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.22.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.22.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.22.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.22.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.22.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.1.0 // indirect
|
||||
go.opentelemetry.io/otel v1.31.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.31.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.31.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.31.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.3.1 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
golang.org/x/crypto v0.21.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a // indirect
|
||||
golang.org/x/net v0.23.0 // indirect
|
||||
golang.org/x/sync v0.6.0 // indirect
|
||||
golang.org/x/text v0.14.0 // indirect
|
||||
golang.org/x/crypto v0.31.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect
|
||||
golang.org/x/net v0.30.0 // indirect
|
||||
golang.org/x/sync v0.10.0 // indirect
|
||||
golang.org/x/text v0.21.0 // indirect
|
||||
golang.org/x/time v0.5.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de // indirect
|
||||
google.golang.org/grpc v1.63.2 // indirect
|
||||
google.golang.org/protobuf v1.33.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 // indirect
|
||||
google.golang.org/grpc v1.69.2 // indirect
|
||||
google.golang.org/protobuf v1.36.1 // indirect
|
||||
gopkg.in/guregu/null.v3 v3.5.0 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
lukechampine.com/blake3 v1.2.1 // indirect
|
||||
)
|
||||
|
|
BIN
go.sum
BIN
go.sum
Binary file not shown.
|
@ -6,7 +6,6 @@ import (
|
|||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/metrics"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -37,16 +36,17 @@ type sizeLimiter struct {
|
|||
currentSize *atomic.Int64
|
||||
}
|
||||
|
||||
func (*sizeLimiter) SetEvacuationInProgress(shardID string, value bool) {}
|
||||
func (*sizeLimiter) AddMethodDuration(method string, d time.Duration) {}
|
||||
func (*sizeLimiter) AddToContainerSize(cnrID string, size int64) {}
|
||||
func (*sizeLimiter) AddToObjectCounter(shardID string, objectType string, delta int) {}
|
||||
func (*sizeLimiter) ClearErrorCounter(shardID string) {}
|
||||
func (*sizeLimiter) DeleteShardMetrics(shardID string) {}
|
||||
func (*sizeLimiter) GC() metrics.GCMetrics { return &noopGCMetrics{} }
|
||||
func (*sizeLimiter) GC() engine.GCMetrics { return &noopGCMetrics{} }
|
||||
func (*sizeLimiter) IncErrorCounter(shardID string) {}
|
||||
func (*sizeLimiter) SetMode(shardID string, mode mode.Mode) {}
|
||||
func (*sizeLimiter) SetObjectCounter(shardID string, objectType string, v uint64) {}
|
||||
func (*sizeLimiter) WriteCache() metrics.WriteCacheMetrics { return &noopWriteCacheMetrics{} }
|
||||
func (*sizeLimiter) WriteCache() engine.WriteCacheMetrics { return &noopWriteCacheMetrics{} }
|
||||
func (*sizeLimiter) DeleteContainerSize(cnrID string) {}
|
||||
func (*sizeLimiter) DeleteContainerCount(cnrID string) {}
|
||||
func (*sizeLimiter) SetContainerObjectCounter(_, _, _ string, _ uint64) {}
|
||||
|
@ -67,17 +67,18 @@ func (sl *sizeLimiter) IsFull() bool {
|
|||
|
||||
type noopLimiter struct{}
|
||||
|
||||
func (*noopLimiter) SetEvacuationInProgress(shardID string, value bool) {}
|
||||
func (*noopLimiter) AddMethodDuration(method string, d time.Duration) {}
|
||||
func (*noopLimiter) AddToContainerSize(cnrID string, size int64) {}
|
||||
func (*noopLimiter) AddToObjectCounter(shardID string, objectType string, delta int) {}
|
||||
func (*noopLimiter) AddToPayloadCounter(shardID string, size int64) {}
|
||||
func (*noopLimiter) ClearErrorCounter(shardID string) {}
|
||||
func (*noopLimiter) DeleteShardMetrics(shardID string) {}
|
||||
func (*noopLimiter) GC() metrics.GCMetrics { return &noopGCMetrics{} }
|
||||
func (*noopLimiter) GC() engine.GCMetrics { return &noopGCMetrics{} }
|
||||
func (*noopLimiter) IncErrorCounter(shardID string) {}
|
||||
func (*noopLimiter) SetMode(shardID string, mode mode.Mode) {}
|
||||
func (*noopLimiter) SetObjectCounter(shardID string, objectType string, v uint64) {}
|
||||
func (*noopLimiter) WriteCache() metrics.WriteCacheMetrics { return &noopWriteCacheMetrics{} }
|
||||
func (*noopLimiter) WriteCache() engine.WriteCacheMetrics { return &noopWriteCacheMetrics{} }
|
||||
func (*noopLimiter) IsFull() bool { return false }
|
||||
func (*noopLimiter) DeleteContainerSize(cnrID string) {}
|
||||
func (*noopLimiter) DeleteContainerCount(cnrID string) {}
|
||||
|
@ -99,7 +100,7 @@ type noopWriteCacheMetrics struct{}
|
|||
|
||||
func (*noopWriteCacheMetrics) AddMethodDuration(_, _, _, _ string, _ bool, _ time.Duration) {}
|
||||
func (*noopWriteCacheMetrics) Close(_, _ string) {}
|
||||
func (*noopWriteCacheMetrics) IncOperationCounter(_, _, _, _ string, _ metrics.NullBool) {}
|
||||
func (*noopWriteCacheMetrics) IncOperationCounter(_, _, _, _ string, _ engine.NullBool) {}
|
||||
func (*noopWriteCacheMetrics) SetActualCount(_, _, _ string, count uint64) {}
|
||||
func (*noopWriteCacheMetrics) SetEstimateSize(_, _, _ string, _ uint64) {}
|
||||
func (*noopWriteCacheMetrics) SetMode(shardID string, mode string) {}
|
||||
|
|
|
@ -30,7 +30,6 @@ import (
|
|||
"go.etcd.io/bbolt"
|
||||
"go.k6.io/k6/js/modules"
|
||||
"go.k6.io/k6/metrics"
|
||||
"go.uber.org/zap"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
|
@ -232,29 +231,30 @@ func (epochState) CurrentEpoch() uint64 { return 0 }
|
|||
//
|
||||
// Note that the configuration file only needs to contain the storage-specific sections.
|
||||
func storageEngineOptionsFromConfig(ctx context.Context, c *config.Config, debug bool, l Limiter) ([]engine.Option, [][]shard.Option, error) {
|
||||
log := zap.L()
|
||||
prm := logger.Prm{}
|
||||
_ = prm.SetDestination("stdout")
|
||||
if debug {
|
||||
var err error
|
||||
log, err = zap.NewDevelopment()
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("creating development logger: %v", err)
|
||||
_ = prm.SetLevelString("debug")
|
||||
}
|
||||
lg, err := logger.NewLogger(&prm)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
ngOpts := []engine.Option{
|
||||
engine.WithErrorThreshold(engineconfig.ShardErrorThreshold(c)),
|
||||
engine.WithShardPoolSize(engineconfig.ShardPoolSize(c)),
|
||||
engine.WithLogger(&logger.Logger{Logger: log}),
|
||||
engine.WithLogger(lg),
|
||||
engine.WithMetrics(l),
|
||||
}
|
||||
|
||||
var shOpts [][]shard.Option
|
||||
|
||||
err := engineconfig.IterateShards(c, false, func(sc *shardconfig.Config) error {
|
||||
err = engineconfig.IterateShards(c, false, func(sc *shardconfig.Config) error {
|
||||
opts := []shard.Option{
|
||||
shard.WithRefillMetabase(sc.RefillMetabase()),
|
||||
shard.WithMode(sc.Mode()),
|
||||
shard.WithLogger(&logger.Logger{Logger: log}),
|
||||
shard.WithLogger(lg),
|
||||
}
|
||||
|
||||
// substorages
|
||||
|
@ -273,7 +273,7 @@ func storageEngineOptionsFromConfig(ctx context.Context, c *config.Config, debug
|
|||
blobovniczatree.WithBlobovniczaShallowDepth(cfg.ShallowDepth()),
|
||||
blobovniczatree.WithBlobovniczaShallowWidth(cfg.ShallowWidth()),
|
||||
blobovniczatree.WithOpenedCacheSize(cfg.OpenedCacheSize()),
|
||||
blobovniczatree.WithLogger(&logger.Logger{Logger: log}),
|
||||
blobovniczatree.WithLogger(lg),
|
||||
),
|
||||
Policy: func(_ *objectSDK.Object, data []byte) bool {
|
||||
return uint64(len(data)) < sc.SmallSizeLimit()
|
||||
|
@ -302,7 +302,7 @@ func storageEngineOptionsFromConfig(ctx context.Context, c *config.Config, debug
|
|||
blobstor.WithCompressObjects(sc.Compress()),
|
||||
blobstor.WithUncompressableContentTypes(sc.UncompressableContentTypes()),
|
||||
blobstor.WithStorages(substorages),
|
||||
blobstor.WithLogger(&logger.Logger{Logger: log}),
|
||||
blobstor.WithLogger(lg),
|
||||
))
|
||||
}
|
||||
|
||||
|
@ -313,14 +313,11 @@ func storageEngineOptionsFromConfig(ctx context.Context, c *config.Config, debug
|
|||
shard.WithWriteCacheOptions(
|
||||
[]writecache.Option{
|
||||
writecache.WithPath(wc.Path()),
|
||||
writecache.WithMaxBatchSize(wc.BoltDB().MaxBatchSize()),
|
||||
writecache.WithMaxBatchDelay(wc.BoltDB().MaxBatchDelay()),
|
||||
writecache.WithMaxObjectSize(wc.MaxObjectSize()),
|
||||
writecache.WithSmallObjectSize(wc.SmallObjectSize()),
|
||||
writecache.WithFlushWorkersCount(wc.WorkerCount()),
|
||||
writecache.WithMaxCacheSize(wc.SizeLimit()),
|
||||
writecache.WithNoSync(wc.NoSync()),
|
||||
writecache.WithLogger(&logger.Logger{Logger: log}),
|
||||
writecache.WithLogger(lg),
|
||||
},
|
||||
),
|
||||
)
|
||||
|
@ -350,7 +347,7 @@ func storageEngineOptionsFromConfig(ctx context.Context, c *config.Config, debug
|
|||
Timeout: 1 * time.Second,
|
||||
}),
|
||||
metabase.WithEpochState(epochState{}),
|
||||
metabase.WithLogger(&logger.Logger{Logger: log}),
|
||||
metabase.WithLogger(lg),
|
||||
))
|
||||
}
|
||||
|
||||
|
|
|
@ -69,7 +69,7 @@ func (c *RawClient) Put(ctx context.Context, containerID cid.ID, ownerID *user.I
|
|||
}
|
||||
|
||||
var req engine.PutPrm
|
||||
req.WithObject(obj)
|
||||
req.Object = obj
|
||||
|
||||
start := time.Now()
|
||||
err = c.ng.Put(ctx, req)
|
||||
|
|
|
@ -1,6 +1,10 @@
|
|||
package logging
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/dop251/goja"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.k6.io/k6/js/modules"
|
||||
|
@ -55,14 +59,29 @@ func (r *RootModule) NewModuleInstance(vu modules.VU) modules.Instance {
|
|||
return &Logging{vu: vu}
|
||||
}
|
||||
|
||||
tsFormat, disableTs := time.TimeOnly, false
|
||||
if val, ok := vu.InitEnv().LookupEnv("DATE_FORMAT"); ok {
|
||||
switch strings.ToLower(val) {
|
||||
case "timeonly":
|
||||
case "datetime":
|
||||
tsFormat = time.DateTime
|
||||
case "none":
|
||||
disableTs = true
|
||||
default:
|
||||
panic(fmt.Sprintf("invalid value for DATE_FORMAT: %s (should be `timeonly`, `datetime` or `none`)", val))
|
||||
}
|
||||
}
|
||||
|
||||
format := lg.Formatter
|
||||
switch f := format.(type) {
|
||||
case *logrus.TextFormatter:
|
||||
f.ForceColors = true
|
||||
f.FullTimestamp = true
|
||||
f.TimestampFormat = "15:04:05"
|
||||
f.TimestampFormat = tsFormat
|
||||
f.DisableTimestamp = disableTs
|
||||
case *logrus.JSONFormatter:
|
||||
f.TimestampFormat = "15:04:05"
|
||||
f.TimestampFormat = tsFormat
|
||||
f.DisableTimestamp = disableTs
|
||||
}
|
||||
|
||||
return &Logging{vu: vu}
|
||||
|
|
58
internal/native/cache.go
Normal file
58
internal/native/cache.go
Normal file
|
@ -0,0 +1,58 @@
|
|||
package native
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
||||
)
|
||||
|
||||
const networkCacheTTL = time.Minute
|
||||
|
||||
var networkInfoCache = &networkInfoCacheT{}
|
||||
|
||||
type networkInfoCacheT struct {
|
||||
guard sync.RWMutex
|
||||
current *netmap.NetworkInfo
|
||||
fetchTS time.Time
|
||||
}
|
||||
|
||||
func (c *networkInfoCacheT) getOrFetch(ctx context.Context, cli *client.Client) (*netmap.NetworkInfo, error) {
|
||||
if v := c.get(); v != nil {
|
||||
return v, nil
|
||||
}
|
||||
return c.fetch(ctx, cli)
|
||||
}
|
||||
|
||||
func (c *networkInfoCacheT) get() *netmap.NetworkInfo {
|
||||
c.guard.RLock()
|
||||
defer c.guard.RUnlock()
|
||||
|
||||
if c.current == nil || time.Since(c.fetchTS) > networkCacheTTL {
|
||||
return nil
|
||||
}
|
||||
|
||||
return c.current
|
||||
}
|
||||
|
||||
func (c *networkInfoCacheT) fetch(ctx context.Context, cli *client.Client) (*netmap.NetworkInfo, error) {
|
||||
c.guard.Lock()
|
||||
defer c.guard.Unlock()
|
||||
|
||||
if time.Since(c.fetchTS) <= networkCacheTTL {
|
||||
return c.current, nil
|
||||
}
|
||||
|
||||
res, err := cli.NetworkInfo(ctx, client.PrmNetworkInfo{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
v := res.Info()
|
||||
c.current = &v
|
||||
c.fetchTS = time.Now()
|
||||
|
||||
return c.current, nil
|
||||
}
|
|
@ -13,7 +13,6 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
|
@ -35,6 +34,7 @@ type (
|
|||
tok session.Object
|
||||
cli *client.Client
|
||||
prepareLocally bool
|
||||
maxObjSize uint64
|
||||
}
|
||||
|
||||
PutResponse struct {
|
||||
|
@ -71,6 +71,7 @@ type (
|
|||
hdr object.Object
|
||||
payload []byte
|
||||
prepareLocally bool
|
||||
maxObjSize uint64
|
||||
}
|
||||
)
|
||||
|
||||
|
@ -103,7 +104,7 @@ func (c *Client) Put(containerID string, headers map[string]string, payload data
|
|||
o.SetOwnerID(owner)
|
||||
o.SetAttributes(attrs...)
|
||||
|
||||
resp, err := put(c.vu, c.cli, c.prepareLocally, &tok, &o, payload, chunkSize)
|
||||
resp, err := put(c.vu, c.cli, c.prepareLocally, &tok, &o, payload, chunkSize, c.maxObjSize)
|
||||
if err != nil {
|
||||
return PutResponse{Success: false, Error: err.Error()}
|
||||
}
|
||||
|
@ -263,16 +264,6 @@ func (c *Client) PutContainer(params map[string]string) PutContainerResponse {
|
|||
container.SetCreationTime(&cnr, time.Now())
|
||||
cnr.SetOwner(usr)
|
||||
|
||||
if basicACLStr, ok := params["acl"]; ok {
|
||||
var basicACL acl.Basic
|
||||
err := basicACL.DecodeString(basicACLStr)
|
||||
if err != nil {
|
||||
return c.putCnrErrorResponse(err)
|
||||
}
|
||||
|
||||
cnr.SetBasicACL(basicACL)
|
||||
}
|
||||
|
||||
placementPolicyStr, ok := params["placement_policy"]
|
||||
if ok {
|
||||
var placementPolicy netmap.PlacementPolicy
|
||||
|
@ -373,6 +364,7 @@ func (c *Client) Onsite(containerID string, payload datagen.Payload) PreparedObj
|
|||
hdr: *obj,
|
||||
payload: data,
|
||||
prepareLocally: c.prepareLocally,
|
||||
maxObjSize: c.maxObjSize,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -398,7 +390,7 @@ func (p PreparedObject) Put(headers map[string]string) PutResponse {
|
|||
return PutResponse{Success: false, Error: err.Error()}
|
||||
}
|
||||
|
||||
_, err = put(p.vu, p.cli, p.prepareLocally, nil, &obj, datagen.NewFixedPayload(p.payload), 0)
|
||||
_, err = put(p.vu, p.cli, p.prepareLocally, nil, &obj, datagen.NewFixedPayload(p.payload), 0, p.maxObjSize)
|
||||
if err != nil {
|
||||
return PutResponse{Success: false, Error: err.Error()}
|
||||
}
|
||||
|
@ -413,7 +405,7 @@ func (s epochSource) CurrentEpoch() uint64 {
|
|||
}
|
||||
|
||||
func put(vu modules.VU, cli *client.Client, prepareLocally bool, tok *session.Object,
|
||||
hdr *object.Object, payload datagen.Payload, chunkSize int,
|
||||
hdr *object.Object, payload datagen.Payload, chunkSize int, maxObjSize uint64,
|
||||
) (*client.ResObjectPut, error) {
|
||||
bufSize := defaultBufferSize
|
||||
if chunkSize > 0 {
|
||||
|
@ -434,13 +426,16 @@ func put(vu modules.VU, cli *client.Client, prepareLocally bool, tok *session.Ob
|
|||
prm.MaxChunkLength = chunkSize
|
||||
}
|
||||
if prepareLocally {
|
||||
res, err := cli.NetworkInfo(vu.Context(), client.PrmNetworkInfo{})
|
||||
ni, err := networkInfoCache.getOrFetch(vu.Context(), cli)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
prm.MaxSize = res.Info().MaxObjectSize()
|
||||
prm.EpochSource = epochSource(res.Info().CurrentEpoch())
|
||||
prm.MaxSize = ni.MaxObjectSize()
|
||||
prm.EpochSource = epochSource(ni.CurrentEpoch())
|
||||
prm.WithoutHomomorphHash = true
|
||||
if maxObjSize > 0 {
|
||||
prm.MaxSize = maxObjSize
|
||||
}
|
||||
}
|
||||
|
||||
objectWriter, err := cli.ObjectPutInit(vu.Context(), prm)
|
||||
|
|
|
@ -52,13 +52,17 @@ func (n *Native) Exports() modules.Exports {
|
|||
return modules.Exports{Default: n}
|
||||
}
|
||||
|
||||
func (n *Native) Connect(endpoint, hexPrivateKey string, dialTimeout, streamTimeout int, prepareLocally bool) (*Client, error) {
|
||||
func (n *Native) Connect(endpoint, hexPrivateKey string, dialTimeout, streamTimeout int, prepareLocally bool, maxObjSize int) (*Client, error) {
|
||||
var (
|
||||
cli client.Client
|
||||
pk *keys.PrivateKey
|
||||
err error
|
||||
)
|
||||
|
||||
if maxObjSize < 0 {
|
||||
return nil, fmt.Errorf("max object size value must be positive")
|
||||
}
|
||||
|
||||
pk, err = keys.NewPrivateKey()
|
||||
if len(hexPrivateKey) != 0 {
|
||||
pk, err = keys.NewPrivateKeyFromHex(hexPrivateKey)
|
||||
|
@ -114,6 +118,21 @@ func (n *Native) Connect(endpoint, hexPrivateKey string, dialTimeout, streamTime
|
|||
tok.SetAuthKey(&key)
|
||||
tok.SetExp(exp)
|
||||
|
||||
res, err := cli.NetworkInfo(n.vu.Context(), client.PrmNetworkInfo{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
prevEpoch := res.Info().CurrentEpoch() - 1
|
||||
tok.SetNbf(prevEpoch)
|
||||
tok.SetIat(prevEpoch)
|
||||
|
||||
if prepareLocally && maxObjSize > 0 {
|
||||
if uint64(maxObjSize) > res.Info().MaxObjectSize() {
|
||||
return nil, fmt.Errorf("max object size must be not greater than %d bytes", res.Info().MaxObjectSize())
|
||||
}
|
||||
}
|
||||
|
||||
// register metrics
|
||||
|
||||
objPutSuccess, _ = stats.Registry.NewMetric("frostfs_obj_put_success", metrics.Counter)
|
||||
|
@ -140,5 +159,6 @@ func (n *Native) Connect(endpoint, hexPrivateKey string, dialTimeout, streamTime
|
|||
tok: tok,
|
||||
cli: &cli,
|
||||
prepareLocally: prepareLocally,
|
||||
maxObjSize: uint64(maxObjSize),
|
||||
}, nil
|
||||
}
|
||||
|
|
|
@ -11,6 +11,7 @@ type ObjExporter struct {
|
|||
|
||||
type PreGenerateInfo struct {
|
||||
Buckets []string `json:"buckets"`
|
||||
Containers []string `json:"containers"`
|
||||
Objects []ObjInfo `json:"objects"`
|
||||
ObjSize string `json:"obj_size"`
|
||||
}
|
||||
|
@ -18,6 +19,8 @@ type PreGenerateInfo struct {
|
|||
type ObjInfo struct {
|
||||
Bucket string `json:"bucket"`
|
||||
Object string `json:"object"`
|
||||
CID string `json:"cid"`
|
||||
OID string `json:"oid"`
|
||||
}
|
||||
|
||||
func NewObjExporter(selector *ObjSelector) *ObjExporter {
|
||||
|
|
156
internal/registry/obj_exporter_test.go
Normal file
156
internal/registry/obj_exporter_test.go
Normal file
|
@ -0,0 +1,156 @@
|
|||
package registry
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type expectedResult struct {
|
||||
mode string
|
||||
objects []ObjectInfo
|
||||
dir string
|
||||
dbName string
|
||||
jsonName string
|
||||
}
|
||||
|
||||
func TestObjectExporter(t *testing.T) {
|
||||
names := []string{"s3", "grpc"}
|
||||
for _, name := range names {
|
||||
t.Run(name, runExportTest)
|
||||
t.Run(name+"-changed", runExportChangedTest)
|
||||
t.Run(name+"-empty", runExportEmptyTest)
|
||||
}
|
||||
}
|
||||
|
||||
func runExportTest(t *testing.T) {
|
||||
expected := getExpectedResult(t)
|
||||
objReg := getFilledRegistry(t, expected)
|
||||
objExp := NewObjExporter(NewObjSelector(objReg, 0, SelectorOneshot, &ObjFilter{Status: statusCreated}))
|
||||
|
||||
require.NoError(t, objExp.ExportJSONPreGen(expected.jsonName))
|
||||
require.NoError(t, checkExported(expected.objects, expected.jsonName))
|
||||
}
|
||||
|
||||
func runExportChangedTest(t *testing.T) {
|
||||
expected := getExpectedResult(t)
|
||||
objReg := getFilledRegistry(t, expected)
|
||||
|
||||
newStatus := randString(10)
|
||||
num := randPositiveInt(1, len(expected.objects))
|
||||
changedObjects := make([]ObjectInfo, num)
|
||||
require.Equal(t, num, copy(changedObjects[:], expected.objects[:]))
|
||||
|
||||
sel := NewObjSelector(objReg, 0, SelectorOneshot, &ObjFilter{Status: statusCreated})
|
||||
for i := range changedObjects {
|
||||
changedObjects[i].Status = newStatus
|
||||
require.NoError(t, objReg.SetObjectStatus(sel.NextObject().Id, statusCreated, newStatus))
|
||||
}
|
||||
|
||||
objExp := NewObjExporter(NewObjSelector(objReg, 0, SelectorOneshot, &ObjFilter{Status: newStatus}))
|
||||
require.NoError(t, objExp.ExportJSONPreGen(expected.jsonName))
|
||||
require.NoError(t, checkExported(changedObjects, expected.jsonName))
|
||||
}
|
||||
|
||||
func runExportEmptyTest(t *testing.T) {
|
||||
expected := getExpectedResult(t)
|
||||
expected.objects = make([]ObjectInfo, 0)
|
||||
objReg := getFilledRegistry(t, expected)
|
||||
objExp := NewObjExporter(NewObjSelector(objReg, 0, SelectorOneshot, &ObjFilter{Status: statusCreated}))
|
||||
|
||||
require.NoError(t, objExp.ExportJSONPreGen(expected.jsonName))
|
||||
require.NoError(t, checkExported(expected.objects, expected.jsonName))
|
||||
}
|
||||
|
||||
func getExpectedResult(t *testing.T) expectedResult {
|
||||
num := randPositiveInt(2, 100)
|
||||
mode := getMode(t.Name())
|
||||
require.NotEqual(t, "", mode, "test mode should contain either \"s3\" or\"grpc\"")
|
||||
dir := t.TempDir()
|
||||
res := expectedResult{
|
||||
mode: mode,
|
||||
objects: generateObjectInfo(num, t.Name()),
|
||||
dir: dir,
|
||||
dbName: filepath.Join(dir, "registry-"+mode+".db"),
|
||||
jsonName: filepath.Join(dir, "registry-"+mode+".json"),
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func randPositiveInt(min, max int) int {
|
||||
return rand.Intn(max-min) + min
|
||||
}
|
||||
|
||||
func getMode(name string) (res string) {
|
||||
if strings.Contains(name, "s3") {
|
||||
res = filepath.Base(name)
|
||||
}
|
||||
if strings.Contains(name, "grpc") {
|
||||
res = filepath.Base(name)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func generateObjectInfo(num int, mode string) []ObjectInfo {
|
||||
res := make([]ObjectInfo, num)
|
||||
for i := range res {
|
||||
res[i] = randomObjectInfo()
|
||||
if !strings.Contains(mode, "s3") {
|
||||
res[i].S3Bucket = ""
|
||||
res[i].S3Key = ""
|
||||
}
|
||||
if !strings.Contains(mode, "grpc") {
|
||||
res[i].CID = ""
|
||||
res[i].OID = ""
|
||||
}
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func getFilledRegistry(t *testing.T, expected expectedResult) *ObjRegistry {
|
||||
objReg := NewObjRegistry(context.Background(), expected.dbName)
|
||||
for i := range expected.objects {
|
||||
require.NoError(t, objReg.AddObject(expected.objects[i].CID, expected.objects[i].OID, expected.objects[i].S3Bucket, expected.objects[i].S3Key, expected.objects[i].PayloadHash))
|
||||
}
|
||||
return objReg
|
||||
}
|
||||
|
||||
func checkExported(expected []ObjectInfo, fileName string) error {
|
||||
file, err := os.ReadFile(fileName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !json.Valid(file) {
|
||||
return fmt.Errorf("exported json file %s is invalid", fileName)
|
||||
}
|
||||
|
||||
var actual PreGenerateInfo
|
||||
if json.Unmarshal(file, &actual) != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(expected) != len(actual.Objects) {
|
||||
return fmt.Errorf("expected len(): %v, got len(): %v", len(expected), len(actual.Objects))
|
||||
}
|
||||
|
||||
for i := range expected {
|
||||
if !slices.ContainsFunc(actual.Objects, func(oi ObjInfo) bool {
|
||||
compareS3 := oi.Bucket == expected[i].S3Bucket && oi.Object == expected[i].S3Key
|
||||
comparegRPC := oi.CID == expected[i].CID && oi.OID == expected[i].OID
|
||||
return compareS3 && comparegRPC
|
||||
}) {
|
||||
return fmt.Errorf("object %v not found in exported json file %s", expected[i], fileName)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -101,7 +101,7 @@ func randomObjectInfo() ObjectInfo {
|
|||
func randString(n int) string {
|
||||
var sb strings.Builder
|
||||
for i := 0; i < n; i++ {
|
||||
sb.WriteRune('a' + rune(rand.Int())%('z'-'a'+1))
|
||||
sb.WriteRune('a' + rune(rand.Int31())%('z'-'a'+1))
|
||||
}
|
||||
return sb.String()
|
||||
}
|
||||
|
|
|
@ -3,12 +3,15 @@ package registry
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/nspcc-dev/neo-go/pkg/io"
|
||||
"go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
const nextObjectTimeout = 10 * time.Second
|
||||
|
||||
type ObjFilter struct {
|
||||
Status string
|
||||
Age int
|
||||
|
@ -21,6 +24,8 @@ type ObjSelector struct {
|
|||
filter *ObjFilter
|
||||
cacheSize int
|
||||
kind SelectorKind
|
||||
// Sync synchronizes VU used for deletion.
|
||||
Sync sync.WaitGroup
|
||||
}
|
||||
|
||||
// objectSelectCache is the default maximum size of a batch to select from DB.
|
||||
|
@ -32,7 +37,6 @@ func NewObjSelector(registry *ObjRegistry, selectionSize int, kind SelectorKind,
|
|||
if selectionSize <= 0 {
|
||||
selectionSize = objectSelectCache
|
||||
}
|
||||
println("selectionSize", selectionSize)
|
||||
if filter == nil || filter.Status == "" {
|
||||
panic("filtering without status is not supported")
|
||||
}
|
||||
|
@ -58,9 +62,18 @@ func NewObjSelector(registry *ObjRegistry, selectionSize int, kind SelectorKind,
|
|||
// - underlying registry context is done, nil objects will be returned on the
|
||||
// currently blocked and every further NextObject calls.
|
||||
func (o *ObjSelector) NextObject() *ObjectInfo {
|
||||
if o.kind == SelectorOneshot {
|
||||
return <-o.objChan
|
||||
}
|
||||
|
||||
select {
|
||||
case <-time.After(nextObjectTimeout):
|
||||
return nil
|
||||
case obj := <-o.objChan:
|
||||
return obj
|
||||
}
|
||||
}
|
||||
|
||||
// Count returns total number of objects that match filter of the selector.
|
||||
func (o *ObjSelector) Count() (int, error) {
|
||||
count := 0
|
||||
|
|
|
@ -142,6 +142,70 @@ func (c *Client) Get(bucket, key string) GetResponse {
|
|||
return GetResponse{Success: true}
|
||||
}
|
||||
|
||||
// DeleteObjectVersion deletes object version with specified versionID.
|
||||
// If version argument is empty, deletes all versions and delete-markers of specified object.
|
||||
func (c *Client) DeleteObjectVersion(bucket, key, version string) DeleteResponse {
|
||||
var toDelete []types.ObjectIdentifier
|
||||
|
||||
if version != "" {
|
||||
toDelete = append(toDelete, types.ObjectIdentifier{
|
||||
Key: aws.String(key),
|
||||
VersionId: aws.String(version),
|
||||
})
|
||||
} else {
|
||||
versions, err := c.cli.ListObjectVersions(c.vu.Context(), &s3.ListObjectVersionsInput{
|
||||
Bucket: aws.String(bucket),
|
||||
Prefix: aws.String(key),
|
||||
})
|
||||
if err != nil {
|
||||
stats.Report(c.vu, objDeleteFails, 1)
|
||||
return DeleteResponse{Success: false, Error: err.Error()}
|
||||
}
|
||||
toDelete = filterObjectVersions(versions, key)
|
||||
}
|
||||
if len(toDelete) == 0 {
|
||||
return c.Delete(bucket, key)
|
||||
} else {
|
||||
_, err := c.cli.DeleteObjects(c.vu.Context(), &s3.DeleteObjectsInput{
|
||||
Bucket: aws.String(bucket),
|
||||
Delete: &types.Delete{
|
||||
Objects: toDelete,
|
||||
Quiet: aws.Bool(true),
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
stats.Report(c.vu, objDeleteFails, 1)
|
||||
return DeleteResponse{Success: false, Error: err.Error()}
|
||||
}
|
||||
}
|
||||
|
||||
return DeleteResponse{Success: true}
|
||||
}
|
||||
|
||||
func filterObjectVersions(versions *s3.ListObjectVersionsOutput, key string) []types.ObjectIdentifier {
|
||||
var result []types.ObjectIdentifier
|
||||
|
||||
for _, v := range versions.Versions {
|
||||
if *v.Key == key {
|
||||
result = append(result, types.ObjectIdentifier{
|
||||
Key: v.Key,
|
||||
VersionId: v.VersionId,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
for _, marker := range versions.DeleteMarkers {
|
||||
if *marker.Key == key {
|
||||
result = append(result, types.ObjectIdentifier{
|
||||
Key: marker.Key,
|
||||
VersionId: marker.VersionId,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func get(
|
||||
c *s3.Client,
|
||||
bucket string,
|
||||
|
@ -208,13 +272,33 @@ func (c *Client) CreateBucket(bucket string, params map[string]string) CreateBuc
|
|||
Bucket: aws.String(bucket),
|
||||
ACL: types.BucketCannedACL(params["acl"]),
|
||||
CreateBucketConfiguration: bucketConfiguration,
|
||||
ObjectLockEnabledForBucket: lockEnabled,
|
||||
ObjectLockEnabledForBucket: aws.Bool(lockEnabled),
|
||||
})
|
||||
if err != nil {
|
||||
stats.Report(c.vu, createBucketFails, 1)
|
||||
return CreateBucketResponse{Success: false, Error: err.Error()}
|
||||
}
|
||||
|
||||
var versioning bool
|
||||
if strVersioned, ok := params["versioning"]; ok {
|
||||
if versioning, err = strconv.ParseBool(strVersioned); err != nil {
|
||||
stats.Report(c.vu, createBucketFails, 1)
|
||||
return CreateBucketResponse{Success: false, Error: err.Error()}
|
||||
}
|
||||
}
|
||||
if versioning {
|
||||
_, err = c.cli.PutBucketVersioning(context.TODO(), &s3.PutBucketVersioningInput{
|
||||
Bucket: aws.String(bucket),
|
||||
VersioningConfiguration: &types.VersioningConfiguration{
|
||||
Status: types.BucketVersioningStatusEnabled,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
stats.Report(c.vu, createBucketFails, 1)
|
||||
return CreateBucketResponse{Success: false, Error: err.Error()}
|
||||
}
|
||||
}
|
||||
|
||||
stats.Report(c.vu, createBucketSuccess, 1)
|
||||
stats.Report(c.vu, createBucketDuration, metrics.D(time.Since(start)))
|
||||
return CreateBucketResponse{Success: true}
|
||||
|
|
|
@ -53,13 +53,9 @@ func (s *S3) Exports() modules.Exports {
|
|||
}
|
||||
|
||||
func (s *S3) Connect(endpoint string, params map[string]string) (*Client, error) {
|
||||
resolver := aws.EndpointResolverWithOptionsFunc(func(_, _ string, _ ...interface{}) (aws.Endpoint, error) {
|
||||
return aws.Endpoint{
|
||||
URL: endpoint,
|
||||
}, nil
|
||||
})
|
||||
|
||||
cfg, err := config.LoadDefaultConfig(s.vu.Context(), config.WithEndpointResolverWithOptions(resolver))
|
||||
cfg, err := config.LoadDefaultConfig(s.vu.Context(),
|
||||
config.WithBaseEndpoint(endpoint),
|
||||
config.WithSharedConfigProfile(params["aws_profile"]))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("configuration error: %w", err)
|
||||
}
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
v2container "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||
"git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/datagen"
|
||||
"git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/local"
|
||||
|
@ -15,7 +16,7 @@ import (
|
|||
|
||||
type Client struct {
|
||||
vu modules.VU
|
||||
l layer.Client
|
||||
l *layer.Layer
|
||||
ownerID *user.ID
|
||||
resolver layer.BucketResolver
|
||||
limiter local.Limiter
|
||||
|
@ -42,12 +43,14 @@ func (c *Client) Put(bucket, key string, payload datagen.Payload) PutResponse {
|
|||
Error: "engine size limit reached",
|
||||
}
|
||||
}
|
||||
cid, err := c.resolver.Resolve(c.vu.Context(), bucket)
|
||||
cid, err := c.resolver.Resolve(c.vu.Context(), v2container.SysAttributeZoneDefault, bucket)
|
||||
if err != nil {
|
||||
stats.Report(c.vu, objPutFails, 1)
|
||||
return PutResponse{Error: err.Error()}
|
||||
}
|
||||
|
||||
size := uint64(payload.Size())
|
||||
|
||||
prm := &layer.PutObjectParams{
|
||||
BktInfo: &data.BucketInfo{
|
||||
Name: bucket,
|
||||
|
@ -57,7 +60,7 @@ func (c *Client) Put(bucket, key string, payload datagen.Payload) PutResponse {
|
|||
},
|
||||
Header: map[string]string{},
|
||||
Object: key,
|
||||
Size: uint64(payload.Size()),
|
||||
Size: &size,
|
||||
Reader: payload.Reader(),
|
||||
}
|
||||
|
||||
|
@ -69,14 +72,14 @@ func (c *Client) Put(bucket, key string, payload datagen.Payload) PutResponse {
|
|||
|
||||
stats.Report(c.vu, objPutDuration, metrics.D(time.Since(start)))
|
||||
stats.Report(c.vu, objPutSuccess, 1)
|
||||
stats.ReportDataSent(c.vu, float64(prm.Size))
|
||||
stats.Report(c.vu, objPutData, float64(prm.Size))
|
||||
stats.ReportDataSent(c.vu, float64(size))
|
||||
stats.Report(c.vu, objPutData, float64(size))
|
||||
|
||||
return PutResponse{Success: true}
|
||||
}
|
||||
|
||||
func (c *Client) Get(bucket, key string) GetResponse {
|
||||
cid, err := c.resolver.Resolve(c.vu.Context(), bucket)
|
||||
cid, err := c.resolver.Resolve(c.vu.Context(), v2container.SysAttributeZoneDefault, bucket)
|
||||
if err != nil {
|
||||
stats.Report(c.vu, objGetFails, 1)
|
||||
return GetResponse{Error: err.Error()}
|
||||
|
|
|
@ -7,11 +7,13 @@ import (
|
|||
"io"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
layer "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/frostfs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/relations"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
|
||||
"git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/local/rawclient"
|
||||
)
|
||||
|
@ -33,61 +35,86 @@ func (*frostfs) CreateContainer(context.Context, layer.PrmContainerCreate) (*lay
|
|||
panic(unimplementedMessage("CreateContainer"))
|
||||
}
|
||||
|
||||
func (*frostfs) Container(ctx context.Context, prmContainer layer.PrmContainer) (*container.Container, error) {
|
||||
func (*frostfs) Container(context.Context, layer.PrmContainer) (*container.Container, error) {
|
||||
panic(unimplementedMessage("Container"))
|
||||
}
|
||||
|
||||
func (*frostfs) UserContainers(ctx context.Context, containers layer.PrmUserContainers) ([]cid.ID, error) {
|
||||
func (*frostfs) UserContainers(context.Context, layer.PrmUserContainers) ([]cid.ID, error) {
|
||||
panic(unimplementedMessage("UserContainers"))
|
||||
}
|
||||
|
||||
func (*frostfs) SetContainerEACL(context.Context, eacl.Table, *session.Container) error {
|
||||
panic(unimplementedMessage("SetContainerEACL"))
|
||||
}
|
||||
|
||||
func (*frostfs) ContainerEACL(ctx context.Context, containerEACL layer.PrmContainerEACL) (*eacl.Table, error) {
|
||||
panic(unimplementedMessage("ContainerEACL"))
|
||||
}
|
||||
|
||||
func (*frostfs) DeleteContainer(context.Context, cid.ID, *session.Container) error {
|
||||
panic(unimplementedMessage("DeleteContainer"))
|
||||
}
|
||||
|
||||
func (f *frostfs) ReadObject(ctx context.Context, prm layer.PrmObjectRead) (*layer.ObjectPart, error) {
|
||||
func (f *frostfs) HeadObject(ctx context.Context, prm layer.PrmObjectHead) (*object.Object, error) {
|
||||
return f.Get(ctx, prm.Container, prm.Object)
|
||||
}
|
||||
|
||||
func (f *frostfs) GetObject(ctx context.Context, prm layer.PrmObjectGet) (*layer.Object, error) {
|
||||
obj, err := f.Get(ctx, prm.Container, prm.Object)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
part := &layer.ObjectPart{}
|
||||
if prm.WithHeader {
|
||||
part.Head = obj
|
||||
}
|
||||
if prm.WithPayload {
|
||||
part.Payload = io.NopCloser(bytes.NewReader(obj.Payload()))
|
||||
}
|
||||
return part, nil
|
||||
|
||||
return &layer.Object{
|
||||
Header: *obj,
|
||||
Payload: io.NopCloser(bytes.NewReader(obj.Payload())),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (f *frostfs) CreateObject(ctx context.Context, prm layer.PrmObjectCreate) (oid.ID, error) {
|
||||
func (f *frostfs) CreateObject(ctx context.Context, prm layer.PrmObjectCreate) (*layer.CreateObjectResult, error) {
|
||||
payload, err := io.ReadAll(prm.Payload)
|
||||
if err != nil {
|
||||
return oid.ID{}, fmt.Errorf("reading payload: %v", err)
|
||||
return nil, fmt.Errorf("reading payload: %v", err)
|
||||
}
|
||||
hdrs := map[string]string{}
|
||||
for _, attr := range prm.Attributes {
|
||||
hdrs[attr[0]] = attr[1]
|
||||
}
|
||||
return f.Put(ctx, prm.Container, nil, hdrs, payload)
|
||||
objID, err := f.Put(ctx, prm.Container, nil, hdrs, payload)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &layer.CreateObjectResult{
|
||||
ObjectID: objID,
|
||||
CreationEpoch: 0, // probably we should additionally invoke NetworkInfo
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (f *frostfs) DeleteObject(context.Context, layer.PrmObjectDelete) error {
|
||||
panic(unimplementedMessage("DeleteObject"))
|
||||
}
|
||||
|
||||
func (f *frostfs) TimeToEpoch(ctx context.Context, now time.Time, future time.Time) (uint64, uint64, error) {
|
||||
func (f *frostfs) TimeToEpoch(context.Context, time.Time, time.Time) (uint64, uint64, error) {
|
||||
panic(unimplementedMessage("TimeToEpoch"))
|
||||
}
|
||||
|
||||
func (f *frostfs) SearchObjects(ctx context.Context, search layer.PrmObjectSearch) ([]oid.ID, error) {
|
||||
func (f *frostfs) SearchObjects(context.Context, layer.PrmObjectSearch) ([]oid.ID, error) {
|
||||
panic(unimplementedMessage("SearchObjects"))
|
||||
}
|
||||
|
||||
func (f *frostfs) AddContainerPolicyChain(context.Context, layer.PrmAddContainerPolicyChain) error {
|
||||
panic(unimplementedMessage("AddContainerPolicyChain"))
|
||||
}
|
||||
|
||||
func (f *frostfs) RangeObject(context.Context, layer.PrmObjectRange) (io.ReadCloser, error) {
|
||||
panic(unimplementedMessage("RangeObject"))
|
||||
}
|
||||
|
||||
func (f *frostfs) PatchObject(context.Context, layer.PrmObjectPatch) (oid.ID, error) {
|
||||
panic(unimplementedMessage("PatchObject"))
|
||||
}
|
||||
|
||||
func (f *frostfs) NetworkInfo(context.Context) (netmap.NetworkInfo, error) {
|
||||
panic(unimplementedMessage("NetworkInfo"))
|
||||
}
|
||||
|
||||
func (f *frostfs) Relations() relations.Relations {
|
||||
panic(unimplementedMessage("Relations"))
|
||||
}
|
||||
|
||||
func (f *frostfs) NetmapSnapshot(context.Context) (netmap.NetMap, error) {
|
||||
panic(unimplementedMessage("NetmapSnapshot"))
|
||||
}
|
||||
|
|
|
@ -1,16 +1,19 @@
|
|||
package s3local
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/pkg/service/tree"
|
||||
v2container "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||
"git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/local"
|
||||
"git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/local/rawclient"
|
||||
"git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/stats"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"github.com/panjf2000/ants/v2"
|
||||
"go.k6.io/k6/js/modules"
|
||||
"go.k6.io/k6/metrics"
|
||||
"go.uber.org/zap"
|
||||
|
@ -147,29 +150,66 @@ func (s *Local) Connect(configFile string, configDir string, params map[string]s
|
|||
return nil, fmt.Errorf("creating bucket resolver: %v", err)
|
||||
}
|
||||
|
||||
cfg := &layer.Config{
|
||||
Cache: layer.NewCache(layer.DefaultCachesConfigs(zap.L())),
|
||||
AnonKey: layer.AnonymousKey{Key: key},
|
||||
Resolver: resolver,
|
||||
TreeService: treeSvc,
|
||||
var owner user.ID
|
||||
user.IDFromKey(&owner, key.PrivateKey.PublicKey)
|
||||
|
||||
workerPool, err := ants.NewPool(100)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("init worker pool: %w", err)
|
||||
}
|
||||
|
||||
l := layer.NewLayer(zap.L(), &frostfs{rc}, cfg)
|
||||
err = l.Initialize(s.l.VU().Context(), nopEventListener{})
|
||||
anonKey, err := keys.NewPrivateKey()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("initialize: %w", err)
|
||||
return nil, fmt.Errorf("create anon key: %w", err)
|
||||
}
|
||||
|
||||
cfg := &layer.Config{
|
||||
GateOwner: owner,
|
||||
Cache: layer.NewCache(layer.DefaultCachesConfigs(zap.L())),
|
||||
AnonKey: layer.AnonymousKey{Key: anonKey},
|
||||
Resolver: resolver,
|
||||
TreeService: treeSvc,
|
||||
Features: &FeatureSettings{},
|
||||
GateKey: key,
|
||||
WorkerPool: workerPool,
|
||||
}
|
||||
|
||||
return &Client{
|
||||
vu: s.l.VU(),
|
||||
l: l,
|
||||
l: layer.NewLayer(zap.L(), &frostfs{rc}, cfg),
|
||||
ownerID: rc.OwnerID(),
|
||||
resolver: resolver,
|
||||
limiter: limiter,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type nopEventListener struct{}
|
||||
type FeatureSettings struct {
|
||||
}
|
||||
|
||||
func (nopEventListener) Subscribe(context.Context, string, layer.MsgHandler) error { return nil }
|
||||
func (nopEventListener) Listen(context.Context) {}
|
||||
func (k *FeatureSettings) TombstoneLifetime() uint64 {
|
||||
return 10
|
||||
}
|
||||
|
||||
func (k *FeatureSettings) TombstoneMembersSize() int {
|
||||
return 100
|
||||
}
|
||||
|
||||
func (k *FeatureSettings) BufferMaxSizeForPut() uint64 {
|
||||
return 1024 * 1024
|
||||
}
|
||||
|
||||
func (k *FeatureSettings) ClientCut() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (k *FeatureSettings) MD5Enabled() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (k *FeatureSettings) FormContainerZone(ns string) string {
|
||||
if ns == "" {
|
||||
return v2container.SysAttributeZoneDefault
|
||||
}
|
||||
|
||||
return ns + ".ns"
|
||||
}
|
||||
|
|
|
@ -24,9 +24,9 @@ func newFixedBucketResolver(bucketMapping map[string]string) (fixedBucketResolve
|
|||
return r, nil
|
||||
}
|
||||
|
||||
func (r fixedBucketResolver) Resolve(_ context.Context, bucket string) (cid.ID, error) {
|
||||
if cid, resolved := r[bucket]; resolved {
|
||||
return cid, nil
|
||||
func (r fixedBucketResolver) Resolve(_ context.Context, zone, bucket string) (cid.ID, error) {
|
||||
if cnrID, resolved := r[zone+"/"+bucket]; resolved {
|
||||
return cnrID, nil
|
||||
}
|
||||
return cid.ID{}, fmt.Errorf("bucket %s is not mapped to any container", bucket)
|
||||
return cid.ID{}, fmt.Errorf("zone %s and bucket %s is not mapped to any container", zone, bucket)
|
||||
}
|
||||
|
|
|
@ -38,15 +38,15 @@ func (kv kv) GetValue() []byte { return kv.v }
|
|||
|
||||
type nodeResponse struct {
|
||||
meta []tree.Meta
|
||||
nodeID uint64
|
||||
parentID uint64
|
||||
ts uint64
|
||||
nodeID []uint64
|
||||
parentID []uint64
|
||||
ts []uint64
|
||||
}
|
||||
|
||||
func (r nodeResponse) GetMeta() []tree.Meta { return r.meta }
|
||||
func (r nodeResponse) GetNodeID() uint64 { return r.nodeID }
|
||||
func (r nodeResponse) GetParentID() uint64 { return r.parentID }
|
||||
func (r nodeResponse) GetTimestamp() uint64 { return r.ts }
|
||||
func (r nodeResponse) GetNodeID() []uint64 { return r.nodeID }
|
||||
func (r nodeResponse) GetParentID() []uint64 { return r.parentID }
|
||||
func (r nodeResponse) GetTimestamp() []uint64 { return r.ts }
|
||||
|
||||
func (s treeServiceEngineWrapper) GetNodes(ctx context.Context, p *tree.GetNodesParams) ([]tree.NodeResponse, error) {
|
||||
nodeIDs, err := s.ng.TreeGetByPath(ctx, p.BktInfo.CID, p.TreeID, pilorama.AttributeFilename, p.Path, p.LatestOnly)
|
||||
|
@ -67,9 +67,9 @@ func (s treeServiceEngineWrapper) GetNodes(ctx context.Context, p *tree.GetNodes
|
|||
return nil, err
|
||||
}
|
||||
resp := nodeResponse{
|
||||
parentID: parentID,
|
||||
nodeID: nodeID,
|
||||
ts: m.Time,
|
||||
parentID: []uint64{parentID},
|
||||
nodeID: []uint64{nodeID},
|
||||
ts: []uint64{m.Time},
|
||||
}
|
||||
if p.AllAttrs {
|
||||
resp.meta = kvToTreeMeta(m.Items)
|
||||
|
@ -89,9 +89,17 @@ func (s treeServiceEngineWrapper) GetNodes(ctx context.Context, p *tree.GetNodes
|
|||
return resps, nil
|
||||
}
|
||||
|
||||
func (s treeServiceEngineWrapper) GetSubTree(ctx context.Context, bktInfo *data.BucketInfo, treeID string, rootID uint64, depth uint32) ([]tree.NodeResponse, error) {
|
||||
func (s treeServiceEngineWrapper) GetSubTree(ctx context.Context, bktInfo *data.BucketInfo, treeID string, rootID []uint64, depth uint32, sort bool) ([]tree.NodeResponse, error) {
|
||||
var resps []tree.NodeResponse
|
||||
|
||||
if len(rootID) != 1 {
|
||||
return nil, fmt.Errorf("unsupported multiple node ids")
|
||||
}
|
||||
|
||||
if sort {
|
||||
return nil, fmt.Errorf("unsupported sorted subtree")
|
||||
}
|
||||
|
||||
var traverse func(nodeID uint64, curDepth uint32) error
|
||||
traverse = func(nodeID uint64, curDepth uint32) error {
|
||||
m, parentID, err := s.ng.TreeGetMeta(ctx, bktInfo.CID, treeID, nodeID)
|
||||
|
@ -100,9 +108,9 @@ func (s treeServiceEngineWrapper) GetSubTree(ctx context.Context, bktInfo *data.
|
|||
}
|
||||
|
||||
resps = append(resps, nodeResponse{
|
||||
nodeID: nodeID,
|
||||
parentID: parentID,
|
||||
ts: m.Time,
|
||||
nodeID: []uint64{nodeID},
|
||||
parentID: []uint64{parentID},
|
||||
ts: []uint64{m.Time},
|
||||
meta: kvToTreeMeta(m.Items),
|
||||
})
|
||||
|
||||
|
@ -122,7 +130,7 @@ func (s treeServiceEngineWrapper) GetSubTree(ctx context.Context, bktInfo *data.
|
|||
return nil
|
||||
}
|
||||
|
||||
if err := traverse(rootID, 0); err != nil {
|
||||
if err := traverse(rootID[0], 0); err != nil {
|
||||
return nil, fmt.Errorf("traversing: %v", err)
|
||||
}
|
||||
|
||||
|
@ -192,7 +200,7 @@ func (s treeServiceEngineWrapper) RemoveNode(ctx context.Context, bktInfo *data.
|
|||
}
|
||||
|
||||
func (s treeServiceEngineWrapper) GetSubTreeStream(
|
||||
ctx context.Context, bktInfo *data.BucketInfo, treeID string, rootID uint64, depth uint32,
|
||||
ctx context.Context, bktInfo *data.BucketInfo, treeID string, rootID []uint64, depth uint32,
|
||||
) (tree.SubTreeStream, error) {
|
||||
panic(unimplementedMessage("TreeService.GetSubTreeStream"))
|
||||
}
|
||||
|
|
|
@ -31,8 +31,8 @@ const grpc_endpoint =
|
|||
const grpc_client = native.connect(
|
||||
grpc_endpoint, '', __ENV.DIAL_TIMEOUT ? parseInt(__ENV.DIAL_TIMEOUT) : 5,
|
||||
__ENV.STREAM_TIMEOUT ? parseInt(__ENV.STREAM_TIMEOUT) : 60,
|
||||
__ENV.PREPARE_LOCALLY ? __ENV.PREPARE_LOCALLY.toLowerCase() === 'true'
|
||||
: false);
|
||||
__ENV.PREPARE_LOCALLY ? __ENV.PREPARE_LOCALLY.toLowerCase() === 'true' : false,
|
||||
1024 * parseInt(__ENV.MAX_OBJECT_SIZE || '0'));
|
||||
const log = logging.new().withField('endpoint', grpc_endpoint);
|
||||
|
||||
const registry_enabled = !!__ENV.REGISTRY_FILE;
|
||||
|
|
|
@ -30,8 +30,8 @@ const grpc_endpoint =
|
|||
const grpc_client = native.connect(
|
||||
grpc_endpoint, '', __ENV.DIAL_TIMEOUT ? parseInt(__ENV.DIAL_TIMEOUT) : 5,
|
||||
__ENV.STREAM_TIMEOUT ? parseInt(__ENV.STREAM_TIMEOUT) : 60,
|
||||
__ENV.PREPARE_LOCALLY ? __ENV.PREPARE_LOCALLY.toLowerCase() === 'true' :
|
||||
false);
|
||||
__ENV.PREPARE_LOCALLY ? __ENV.PREPARE_LOCALLY.toLowerCase() === 'true' : false,
|
||||
1024 * parseInt(__ENV.MAX_OBJECT_SIZE || '0'));
|
||||
const log = logging.new().withField('endpoint', grpc_endpoint);
|
||||
|
||||
const registry_enabled = !!__ENV.REGISTRY_FILE;
|
||||
|
|
|
@ -1,2 +1,2 @@
|
|||
(()=>{"use strict";var t={n:r=>{var e=r&&r.__esModule?()=>r.default:()=>r;return t.d(e,{a:e}),e},d:(r,e)=>{for(var n in e)t.o(e,n)&&!t.o(r,n)&&Object.defineProperty(r,n,{enumerable:!0,get:e[n]})},o:(t,r)=>Object.prototype.hasOwnProperty.call(t,r),r:t=>{"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(t,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(t,"__esModule",{value:!0})}},r={};t.r(r),t.d(r,{findBetween:()=>x,getCurrentStageIndex:()=>i,normalDistributionStages:()=>m,parseDuration:()=>o,randomIntBetween:()=>d,randomItem:()=>h,randomString:()=>p,tagWithCurrentStageIndex:()=>u,tagWithCurrentStageProfile:()=>s,uuidv4:()=>g});const e=require("k6/execution");var n=t.n(e);function o(t){if(null==t||t.length<1)throw new Error("str is empty");for(var r=0,e="",n={},o=0;o<t.length;o++)if((a(t[o])||"."==t[o])&&(e+=t[o]),null!=t[o+1]&&!a(t[o+1])&&"."!=t[o+1]){var i=parseFloat(e,10),u=t[o+1];switch(u){case"d":r+=24*i*60*60*1e3;break;case"h":r+=60*i*60*1e3;break;case"m":o+2<t.length&&"s"==t[o+2]?(r+=Math.trunc(i),o++,u="ms"):r+=60*i*1e3;break;case"s":r+=1e3*i;break;default:throw new Error("".concat(u," is an unsupported time unit"))}if(n[u])throw new Error("".concat(u," time unit is provided multiple times"));n[u]=!0,o++,e=""}return e.length>0&&(r+=parseFloat(e,10)),r}function a(t){return t>="0"&&t<="9"}function i(){if(null==n()||null==n().test||null==n().test.options)throw new Error("k6/execution.test.options is undefined - getCurrentStageIndex requires a k6 v0.38.0 or later. Please, upgrade for getting k6/execution.test.options supported.");var t=n().test.options.scenarios[n().scenario.name];if(null==t)throw new Error("the exec.test.options object doesn't contain the current scenario ".concat(n().scenario.name));if(null==t.stages)throw new Error("only ramping-vus or ramping-arravial-rate supports stages, it is not possible to get a stage index on other executors.");if(t.stages.length<1)throw new Error("the current scenario ".concat(t.name," doesn't contain any stage"));for(var r=0,e=new Date-n().scenario.startTime,a=0;a<t.stages.length;a++)if(e<(r+=o(t.stages[a].duration)))return a;return t.stages.length-1}function u(){n().vu.tags.stage=i()}function s(){n().vu.tags.stage_profile=function(){var t=i();if(t<1)return"ramp-up";var r=n().test.options.scenarios[n().scenario.name].stages,e=r[t],o=r[t-1];return e.target>o.target?"ramp-up":o.target==e.target?"steady":"ramp-down"}()}const l=require("k6/crypto");function c(t){return function(t){if(Array.isArray(t))return f(t)}(t)||function(t){if("undefined"!=typeof Symbol&&null!=t[Symbol.iterator]||null!=t["@@iterator"])return Array.from(t)}(t)||function(t,r){if(!t)return;if("string"==typeof t)return f(t,r);var e=Object.prototype.toString.call(t).slice(8,-1);"Object"===e&&t.constructor&&(e=t.constructor.name);if("Map"===e||"Set"===e)return Array.from(t);if("Arguments"===e||/^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(e))return f(t,r)}(t)||function(){throw new TypeError("Invalid attempt to spread non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")}()}function f(t,r){(null==r||r>t.length)&&(r=t.length);for(var e=0,n=new Array(r);e<r;e++)n[e]=t[e];return n}function g(){var t=arguments.length>0&&void 0!==arguments[0]&&arguments[0];return t?y():v()}function d(t,r){return Math.floor(Math.random()*(r-t+1)+t)}function h(t){return t[Math.floor(Math.random()*t.length)]}function p(t){for(var r=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"abcdefghijklmnopqrstuvwxyz",e="";t--;)e+=r[Math.random()*r.length|0];return e}function x(t,r,e){for(var n,o=arguments.length>3&&void 0!==arguments[3]&&arguments[3],a=[],i=!0,u=0;i&&-1!=(n=t.indexOf(r))&&(n+=r.length,-1!=(u=t.indexOf(e,n)));){var s=t.substring(n,u);if(!o)return s;a.push(s),t=t.substring(u+e.length)}return a.length?a:null}function m(t,r){var e=arguments.length>2&&void 0!==arguments[2]?arguments[2]:10;function n(t,r,e){return Math.exp(-.5*Math.pow((e-t)/r,2))/(r*Math.sqrt(2*Math.PI))}for(var o=0,a=1,i=new Array(e+2).fill(0),u=new Array(e+2).fill(Math.ceil(r/6)),s=[],l=0;l<=e;l++)i[l]=n(o,a,-2*a+4*a*l/e);for(var f=Math.max.apply(Math,c(i)),g=i.map((function(r){return Math.round(r*t/f)})),d=1;d<=e;d++)u[d]=Math.ceil(4*r/(6*e));for(var h=0;h<=e+1;h++)s.push({duration:"".concat(u[h],"s"),target:g[h]});return s}function v(){return"xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx".replace(/[xy]/g,(function(t){var r=16*Math.random()|0;return("x"===t?r:3&r|8).toString(16)}))}function y(){for(var t=[],r=0;r<256;++r)t.push((r+256).toString(16).slice(1));var e=new Uint8Array((0,l.randomBytes)(16));return e[6]=15&e[6]|64,e[8]=63&e[8]|128,(t[e[0]]+t[e[1]]+t[e[2]]+t[e[3]]+"-"+t[e[4]]+t[e[5]]+"-"+t[e[6]]+t[e[7]]+"-"+t[e[8]]+t[e[9]]+"-"+t[e[10]]+t[e[11]]+t[e[12]]+t[e[13]]+t[e[14]]+t[e[15]]).toLowerCase()}var w=exports;for(var b in r)w[b]=r[b];r.__esModule&&Object.defineProperty(w,"__esModule",{value:!0})})();
|
||||
//# sourceMappingURL=index.js.map
|
||||
|
||||
|
|
34
scenarios/libs/keygen.js
Normal file
34
scenarios/libs/keygen.js
Normal file
|
@ -0,0 +1,34 @@
|
|||
import { uuidv4 } from './k6-utils-1.4.0.js';
|
||||
|
||||
export function generateS3Key() {
|
||||
let width = parseInt(__ENV.DIR_WIDTH || '0');
|
||||
let height = parseInt(__ENV.DIR_HEIGHT || '0');
|
||||
|
||||
let key = ''
|
||||
if (width > 0 && height > 0) {
|
||||
for (let index = 0; index < height; index++) {
|
||||
const w = Math.floor(Math.random() * width) + 1;
|
||||
key = key + 'dir' + w + '/';
|
||||
}
|
||||
}
|
||||
|
||||
key += objName();
|
||||
return key;
|
||||
}
|
||||
|
||||
const asciiLetters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890"
|
||||
|
||||
function objName() {
|
||||
if (__ENV.OBJ_NAME) {
|
||||
return __ENV.OBJ_NAME;
|
||||
}
|
||||
const length = parseInt(__ENV.OBJ_NAME_LENGTH || '0');
|
||||
if (length > 0) {
|
||||
let name = "";
|
||||
for (let i = 0; i < length; i++) {
|
||||
name += asciiLetters.charAt(Math.floor(Math.random() * asciiLetters.length));
|
||||
}
|
||||
return name;
|
||||
}
|
||||
return uuidv4();
|
||||
}
|
|
@ -4,17 +4,18 @@ from helpers.cmd import execute_cmd, log
|
|||
|
||||
|
||||
def create_bucket(endpoint, versioning, location, acl, no_verify_ssl):
|
||||
configuration = ""
|
||||
if location:
|
||||
location = f"--create-bucket-configuration 'LocationConstraint={location}'"
|
||||
configuration = f"--create-bucket-configuration 'LocationConstraint={location}'"
|
||||
if acl:
|
||||
acl = f"--acl {acl}"
|
||||
|
||||
bucket_name = str(uuid.uuid4())
|
||||
no_verify_ssl_str = "--no-verify-ssl" if no_verify_ssl else ""
|
||||
cmd_line = f"aws {no_verify_ssl_str} s3api create-bucket --bucket {bucket_name} " \
|
||||
f"--endpoint {endpoint} {location} {acl} "
|
||||
f"--endpoint {endpoint} {configuration} {acl} "
|
||||
cmd_line_ver = f"aws {no_verify_ssl_str} s3api put-bucket-versioning --bucket {bucket_name} " \
|
||||
f"--versioning-configuration Status=Enabled --endpoint {endpoint} {acl} "
|
||||
f"--versioning-configuration Status=Enabled --endpoint {endpoint}"
|
||||
|
||||
output, success = execute_cmd(cmd_line)
|
||||
|
||||
|
@ -24,7 +25,7 @@ def create_bucket(endpoint, versioning, location, acl, no_verify_ssl):
|
|||
f"Error: {output}", endpoint)
|
||||
return False
|
||||
|
||||
if versioning == "True":
|
||||
if versioning:
|
||||
output, success = execute_cmd(cmd_line_ver)
|
||||
if not success:
|
||||
log(f"{cmd_line_ver}\n"
|
||||
|
@ -33,7 +34,7 @@ def create_bucket(endpoint, versioning, location, acl, no_verify_ssl):
|
|||
else:
|
||||
log(f"Bucket versioning has been applied for bucket {bucket_name}", endpoint)
|
||||
|
||||
log(f"Created bucket: {bucket_name}", endpoint)
|
||||
log(f"Created bucket: {bucket_name} ({location})", endpoint)
|
||||
return bucket_name
|
||||
|
||||
|
||||
|
|
|
@ -1,18 +1,16 @@
|
|||
import re
|
||||
from helpers.cmd import execute_cmd, log
|
||||
|
||||
def create_container(endpoint, policy, wallet_path, config, acl, local=False, depth=0):
|
||||
if depth > 20:
|
||||
def create_container(endpoint, policy, container_creation_retry, wallet_path, config, rules, local=False, retry=0):
|
||||
if retry > int(container_creation_retry):
|
||||
raise ValueError(f"unable to create container: too many unsuccessful attempts")
|
||||
|
||||
if wallet_path:
|
||||
wallet_file = f"--wallet {wallet_path}"
|
||||
if config:
|
||||
wallet_config = f"--config {config}"
|
||||
if acl:
|
||||
acl_param = f"--basic-acl {acl}"
|
||||
cmd_line = f"frostfs-cli --rpc-endpoint {endpoint} container create {wallet_file} {wallet_config} " \
|
||||
f" --policy '{policy}' {acl_param} --await"
|
||||
f" --policy '{policy}' --await"
|
||||
|
||||
output, success = execute_cmd(cmd_line)
|
||||
|
||||
|
@ -34,7 +32,21 @@ def create_container(endpoint, policy, wallet_path, config, acl, local=False, de
|
|||
raise ValueError(f"no CID was parsed from command output:\t{fst_str}")
|
||||
cid = splitted[1]
|
||||
|
||||
log(f"Created container {cid}", endpoint)
|
||||
log(f"Created container: {cid} ({policy})", endpoint)
|
||||
|
||||
# Add rule for container
|
||||
if rules:
|
||||
r = ""
|
||||
for rule in rules:
|
||||
r += f" --rule '{rule}' "
|
||||
cmd_line = f"frostfs-cli --rpc-endpoint {endpoint} ape-manager add {wallet_file} {wallet_config} " \
|
||||
f" --chain-id 'chain-id' {r} --target-name '{cid}' --target-type 'container'"
|
||||
output, success = execute_cmd(cmd_line)
|
||||
if not success:
|
||||
log(f"{cmd_line}\n"
|
||||
f"Rule has not been added\n"
|
||||
f"{output}", endpoint)
|
||||
return False
|
||||
|
||||
if not local:
|
||||
return cid
|
||||
|
@ -88,7 +100,7 @@ def create_container(endpoint, policy, wallet_path, config, acl, local=False, de
|
|||
return cid
|
||||
|
||||
log(f"Created container {cid} is not stored on {endpoint}, creating another one...", endpoint)
|
||||
return create_container(endpoint, policy, wallet_path, config, acl, local, depth + 1)
|
||||
return create_container(endpoint, policy, container_creation_retry, wallet_path, config, rules, local, retry + 1)
|
||||
|
||||
|
||||
def upload_object(container, payload_filepath, endpoint, wallet_file, wallet_config):
|
||||
|
|
|
@ -15,18 +15,21 @@ from helpers.frostfs_cli import create_container, upload_object
|
|||
ERROR_WRONG_CONTAINERS_COUNT = 1
|
||||
ERROR_WRONG_OBJECTS_COUNT = 2
|
||||
MAX_WORKERS = 50
|
||||
DEFAULT_POLICY = "REP 2 IN X CBF 2 SELECT 2 FROM * AS X"
|
||||
DEFAULT_RULES = ["allow Object.* *"]
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--size', help='Upload objects size in kb')
|
||||
parser.add_argument('--containers', help='Number of containers to create')
|
||||
parser.add_argument('--retry', default=20, help='Maximum number of retries to create a container')
|
||||
parser.add_argument('--out', help='JSON file with output')
|
||||
parser.add_argument('--preload_obj', help='Number of pre-loaded objects')
|
||||
parser.add_argument('--wallet', help='Wallet file path')
|
||||
parser.add_argument('--config', help='Wallet config file path')
|
||||
parser.add_argument(
|
||||
"--policy",
|
||||
help="Container placement policy",
|
||||
default="REP 2 IN X CBF 2 SELECT 2 FROM * AS X"
|
||||
help=f"Container placement policy. Default is {DEFAULT_POLICY}",
|
||||
action="append"
|
||||
)
|
||||
parser.add_argument('--endpoint', help='Nodes addresses separated by comma.')
|
||||
parser.add_argument('--update', help='Save existed containers')
|
||||
|
@ -35,7 +38,10 @@ parser.add_argument('--workers', help='Count of workers in preset. Max = 50, Def
|
|||
parser.add_argument('--sleep', help='Time to sleep between containers creation and objects upload (in seconds), '
|
||||
'Default = 8', default=8)
|
||||
parser.add_argument('--local', help='Create containers that store data on provided endpoints. Warning: additional empty containers may be created.', action='store_true')
|
||||
parser.add_argument('--acl', help='Container ACL. Default is public-read-write.', default='public-read-write')
|
||||
parser.add_argument(
|
||||
'--rule',
|
||||
help='Rule attached to created containers. All entries of CONTAINER_ID will be replaced with id of created container.',
|
||||
action="append")
|
||||
|
||||
args: Namespace = parser.parse_args()
|
||||
print(args)
|
||||
|
@ -46,11 +52,17 @@ def main():
|
|||
objects_list = []
|
||||
|
||||
endpoints = args.endpoint.split(',')
|
||||
if not args.policy:
|
||||
args.policy = [DEFAULT_POLICY]
|
||||
|
||||
container_creation_retry = args.retry
|
||||
wallet = args.wallet
|
||||
wallet_config = args.config
|
||||
workers = int(args.workers)
|
||||
objects_per_container = int(args.preload_obj)
|
||||
rules = args.rule
|
||||
if not rules:
|
||||
rules = DEFAULT_RULES
|
||||
|
||||
ignore_errors = args.ignore_errors
|
||||
if args.update:
|
||||
|
@ -63,9 +75,9 @@ def main():
|
|||
containers_count = int(args.containers)
|
||||
print(f"Create containers: {containers_count}")
|
||||
with ProcessPoolExecutor(max_workers=min(MAX_WORKERS, workers)) as executor:
|
||||
containers_runs = [executor.submit(create_container, endpoint, args.policy, wallet, wallet_config, args.acl, args.local)
|
||||
for _, endpoint in
|
||||
zip(range(containers_count), cycle(endpoints))]
|
||||
containers_runs = [executor.submit(create_container, endpoint, policy, container_creation_retry, wallet, wallet_config, rules, args.local)
|
||||
for _, endpoint, policy in
|
||||
zip(range(containers_count), cycle(endpoints), cycle(args.policy))]
|
||||
|
||||
for run in containers_runs:
|
||||
container_id = run.result()
|
||||
|
|
|
@ -11,6 +11,12 @@ from concurrent.futures import ProcessPoolExecutor
|
|||
from helpers.cmd import random_payload
|
||||
from helpers.aws_cli import create_bucket, upload_object
|
||||
|
||||
ERROR_WRONG_CONTAINERS_COUNT = 1
|
||||
ERROR_WRONG_OBJECTS_COUNT = 2
|
||||
ERROR_WRONG_PERCENTAGE = 3
|
||||
MAX_WORKERS = 50
|
||||
DEFAULT_LOCATION = ""
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
|
||||
parser.add_argument('--size', help='Upload objects size in kb.')
|
||||
|
@ -20,8 +26,9 @@ parser.add_argument('--preload_obj', help='Number of pre-loaded objects.')
|
|||
parser.add_argument('--endpoint', help='S3 Gateways addresses separated by comma.')
|
||||
parser.add_argument('--update', help='True/False, False by default. Save existed buckets from target file (--out). '
|
||||
'New buckets will not be created.')
|
||||
parser.add_argument('--location', help='AWS location. Will be empty, if has not be declared.', default="")
|
||||
parser.add_argument('--versioning', help='True/False, False by default.')
|
||||
parser.add_argument('--location', help=f'AWS location constraint. Default is "{DEFAULT_LOCATION}"', action="append")
|
||||
parser.add_argument('--versioning', help='True/False, False by default. Alias of --buckets_versioned=100')
|
||||
parser.add_argument('--buckets_versioned', help='Percent of versioned buckets. Default is 0', default=0)
|
||||
parser.add_argument('--ignore-errors', help='Ignore preset errors', action='store_true')
|
||||
parser.add_argument('--no-verify-ssl', help='Ignore SSL verifications', action='store_true')
|
||||
parser.add_argument('--workers', help='Count of workers in preset. Max = 50, Default = 50', default=50)
|
||||
|
@ -32,10 +39,6 @@ parser.add_argument('--acl', help='Bucket ACL. Default is private. Expected valu
|
|||
args = parser.parse_args()
|
||||
print(args)
|
||||
|
||||
ERROR_WRONG_CONTAINERS_COUNT = 1
|
||||
ERROR_WRONG_OBJECTS_COUNT = 2
|
||||
MAX_WORKERS = 50
|
||||
|
||||
def main():
|
||||
buckets = []
|
||||
objects_list = []
|
||||
|
@ -43,6 +46,8 @@ def main():
|
|||
no_verify_ssl = args.no_verify_ssl
|
||||
|
||||
endpoints = args.endpoint.split(',')
|
||||
if not args.location:
|
||||
args.location = [DEFAULT_LOCATION]
|
||||
|
||||
workers = int(args.workers)
|
||||
objects_per_bucket = int(args.preload_obj)
|
||||
|
@ -59,9 +64,18 @@ def main():
|
|||
print(f"Create buckets: {buckets_count}")
|
||||
|
||||
with ProcessPoolExecutor(max_workers=min(MAX_WORKERS, workers)) as executor:
|
||||
buckets_runs = [executor.submit(create_bucket, endpoint, args.versioning, args.location, args.acl, no_verify_ssl)
|
||||
for _, endpoint in
|
||||
zip(range(buckets_count), cycle(endpoints))]
|
||||
if not 0 <= int(args.buckets_versioned) <= 100:
|
||||
print(f"Percent of versioned buckets must be between 0 and 100: got {args.buckets_versioned}")
|
||||
if not ignore_errors:
|
||||
sys.exit(ERROR_WRONG_PERCENTAGE)
|
||||
if args.versioning == "True":
|
||||
versioning_per_bucket = [True] * buckets_count
|
||||
else:
|
||||
num_versioned_buckets = int((int(args.buckets_versioned) / 100) * buckets_count)
|
||||
versioning_per_bucket = [True] * num_versioned_buckets + [False] * (buckets_count - num_versioned_buckets)
|
||||
buckets_runs = [executor.submit(create_bucket, endpoint, versioning_per_bucket[i], location, args.acl, no_verify_ssl)
|
||||
for i, endpoint, location in
|
||||
zip(range(buckets_count), cycle(endpoints), cycle(args.location))]
|
||||
|
||||
for run in buckets_runs:
|
||||
bucket_name = run.result()
|
||||
|
|
|
@ -21,6 +21,7 @@ Scenarios `grpc.js`, `local.js`, `http.js` and `s3.js` support the following opt
|
|||
* `PAYLOAD_TYPE` - type of an object payload ("random" or "text", default: "random").
|
||||
* `STREAMING` - if set, the payload is generated on the fly and is not read into memory fully.
|
||||
* `METRIC_TAGS` - custom metrics tags (format `tag1:value1;tag2:value2`).
|
||||
* `DATE_FORMAT` - custom datetime format: `timeonly` (default), `datetime` or `none`.
|
||||
|
||||
Additionally, the profiling extension can be enabled to generate CPU and memory profiles which can be inspected with `go tool pprof file.prof`:
|
||||
```shell
|
||||
|
@ -125,7 +126,7 @@ The tests will use all pre-created buckets for PUT operations and all pre-create
|
|||
$ ./scenarios/preset/preset_s3.py --size 1024 --buckets 1 --out s3_1024kb.json --endpoint host1:8084 --preload_obj 500 --location load-1-4
|
||||
```
|
||||
* '--location' - specify the name of container policy (from policy.json file). It's important to run 'aws configure' each time when the policy file has been changed to pick up the latest policies.
|
||||
|
||||
* '--buckets_versioned' - specify the percentage of versioned buckets from the total number of created buckets. Default is 0
|
||||
3. Execute scenario with options:
|
||||
|
||||
```shell
|
||||
|
@ -138,6 +139,8 @@ Options (in addition to the common options):
|
|||
* `DELETE_AGE` - age of object in seconds before which it can not be deleted. This parameter can be used to control how many objects we have in the system under load.
|
||||
* `SLEEP_DELETE` - time interval (in seconds) between deleting VU iterations.
|
||||
* `OBJ_NAME` - if specified, this name will be used for all write operations instead of random generation.
|
||||
* `OBJ_NAME_LENGTH` - if specified, then name of the object will be generated with the specified length of ASCII characters.
|
||||
* `DIR_HEIGHT`, `DIR_WIDTH` - if both specified, object name will consist of `DIR_HEIGHT` directories, each of which can have `DIR_WIDTH` subdirectories, for example for `DIR_HEIGHT = 3, DIR_WIDTH = 100`, object names will be `/dir{1...100}/dir{1...100}/dir{1...100}/{uuid || OBJ_NAME}`
|
||||
|
||||
## S3 Multipart
|
||||
|
||||
|
|
|
@ -6,10 +6,10 @@ import registry from 'k6/x/frostfs/registry';
|
|||
import s3 from 'k6/x/frostfs/s3';
|
||||
import stats from 'k6/x/frostfs/stats';
|
||||
|
||||
import {newGenerator} from './libs/datagen.js';
|
||||
import {generateS3Key} from './libs/keygen.js';
|
||||
import {parseEnv} from './libs/env-parser.js';
|
||||
import {textSummary} from './libs/k6-summary-0.0.2.js';
|
||||
import {uuidv4} from './libs/k6-utils-1.4.0.js';
|
||||
import {newGenerator} from './libs/datagen.js';
|
||||
|
||||
parseEnv();
|
||||
|
||||
|
@ -82,7 +82,7 @@ if (registry_enabled && delete_age) {
|
|||
obj_to_delete_selector =
|
||||
constructor(__ENV.REGISTRY_FILE, 'obj_to_delete',
|
||||
__ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0, {
|
||||
status : 'read',
|
||||
status : 'created',
|
||||
age : delete_age,
|
||||
});
|
||||
}
|
||||
|
@ -132,6 +132,10 @@ export function setup() {
|
|||
const start_timestamp = Date.now()
|
||||
console.log(
|
||||
`Load started at: ${Date(start_timestamp).toString()}`)
|
||||
|
||||
if (delete_vu_count > 0){
|
||||
obj_to_delete_selector.sync.add(delete_vu_count)
|
||||
}
|
||||
}
|
||||
|
||||
export function teardown(data) {
|
||||
|
@ -155,16 +159,16 @@ export function obj_write() {
|
|||
sleep(__ENV.SLEEP_WRITE);
|
||||
}
|
||||
|
||||
const key = __ENV.OBJ_NAME || uuidv4();
|
||||
const key = generateS3Key();
|
||||
const bucket = bucket_list[Math.floor(Math.random() * bucket_list.length)];
|
||||
|
||||
const payload = generator.genPayload();
|
||||
const resp = s3_client.put(bucket, key, payload);
|
||||
log.withFields({bucket : bucket, key : key, op : 'WRITE'}).info("INFO");
|
||||
if (!resp.success) {
|
||||
log.withFields({bucket : bucket, key : key}).error(resp.error);
|
||||
return;
|
||||
}
|
||||
|
||||
if (obj_registry) {
|
||||
obj_registry.addObject('', '', bucket, key, payload.hash());
|
||||
}
|
||||
|
@ -181,12 +185,9 @@ export function obj_read() {
|
|||
return;
|
||||
}
|
||||
const resp = s3_client.get(obj.s3_bucket, obj.s3_key)
|
||||
log.withFields({bucket : obj.s3_bucket, key : obj.s3_key, op : 'READ'}).info("INFO")
|
||||
if (!resp.success) {
|
||||
log.withFields({bucket : obj.s3_bucket, key : obj.s3_key, op : 'READ'})
|
||||
log.withFields({bucket : obj.s3_bucket, key : obj.s3_key})
|
||||
.error(resp.error);
|
||||
} else {
|
||||
obj_registry.setObjectStatus(obj.id, obj.status, 'read');
|
||||
}
|
||||
return
|
||||
}
|
||||
|
@ -207,12 +208,13 @@ export function obj_delete() {
|
|||
const obj = obj_to_delete_selector.nextObject();
|
||||
if (!obj) {
|
||||
if (obj_to_delete_exit_on_null) {
|
||||
obj_to_delete_selector.sync.done()
|
||||
obj_to_delete_selector.sync.wait()
|
||||
exec.test.abort("No more objects to select");
|
||||
}
|
||||
return;
|
||||
}
|
||||
obj_registry.setObjectStatus(obj.id, obj.status, 'delete');
|
||||
log.withFields({bucket : obj.s3_bucket, key : obj.s3_key, op : 'DELETE'}).info("INFO");
|
||||
|
||||
const resp = s3_client.delete(obj.s3_bucket, obj.s3_key);
|
||||
if (!resp.success) {
|
||||
log.withFields({bucket : obj.s3_bucket, key : obj.s3_key, op : 'DELETE'})
|
||||
|
|
|
@ -5,10 +5,10 @@ import registry from 'k6/x/frostfs/registry';
|
|||
import s3 from 'k6/x/frostfs/s3';
|
||||
import stats from 'k6/x/frostfs/stats';
|
||||
|
||||
import {generateS3Key} from './libs/keygen.js';
|
||||
import {newGenerator} from './libs/datagen.js';
|
||||
import {parseEnv} from './libs/env-parser.js';
|
||||
import {textSummary} from './libs/k6-summary-0.0.2.js';
|
||||
import {uuidv4} from './libs/k6-utils-1.4.0.js';
|
||||
|
||||
parseEnv();
|
||||
|
||||
|
@ -177,7 +177,7 @@ export function obj_write() {
|
|||
sleep(__ENV.SLEEP_WRITE);
|
||||
}
|
||||
|
||||
const key = __ENV.OBJ_NAME || uuidv4();
|
||||
const key = generateS3Key();
|
||||
const bucket = bucket_list[Math.floor(Math.random() * bucket_list.length)];
|
||||
|
||||
const payload = generator.genPayload();
|
||||
|
|
|
@ -6,10 +6,10 @@ import registry from 'k6/x/frostfs/registry';
|
|||
import s3 from 'k6/x/frostfs/s3';
|
||||
import stats from 'k6/x/frostfs/stats';
|
||||
|
||||
import {generateS3Key} from './libs/keygen.js';
|
||||
import {newGenerator} from './libs/datagen.js';
|
||||
import {parseEnv} from './libs/env-parser.js';
|
||||
import {textSummary} from './libs/k6-summary-0.0.2.js';
|
||||
import {uuidv4} from './libs/k6-utils-1.4.0.js';
|
||||
|
||||
parseEnv();
|
||||
|
||||
|
@ -71,11 +71,23 @@ if (write_vu_count > 0) {
|
|||
};
|
||||
}
|
||||
|
||||
const read_vu_count = parseInt(__ENV.READERS || '0');
|
||||
if (read_vu_count > 0) {
|
||||
scenarios.read = {
|
||||
executor : 'constant-vus',
|
||||
vus : read_vu_count,
|
||||
duration : `${duration}s`,
|
||||
exec : 'obj_read',
|
||||
gracefulStop : '5s',
|
||||
};
|
||||
}
|
||||
|
||||
const delete_age = __ENV.DELETE_AGE ? parseInt(__ENV.DELETE_AGE) : undefined;
|
||||
let obj_to_delete_selector = undefined;
|
||||
let obj_to_delete_exit_on_null = undefined;
|
||||
if (registry_enabled && delete_age) {
|
||||
obj_to_delete_exit_on_null = write_vu_count == 0;
|
||||
|
||||
if (registry_enabled ) {
|
||||
obj_to_delete_exit_on_null = (write_vu_count == 0) && (read_vu_count == 0)
|
||||
|
||||
let constructor = obj_to_delete_exit_on_null ? registry.getOneshotSelector
|
||||
: registry.getSelector;
|
||||
|
@ -88,16 +100,7 @@ if (registry_enabled && delete_age) {
|
|||
});
|
||||
}
|
||||
|
||||
const read_vu_count = parseInt(__ENV.READERS || '0');
|
||||
if (read_vu_count > 0) {
|
||||
scenarios.read = {
|
||||
executor : 'constant-vus',
|
||||
vus : read_vu_count,
|
||||
duration : `${duration}s`,
|
||||
exec : 'obj_read',
|
||||
gracefulStop : '5s',
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
const delete_vu_count = parseInt(__ENV.DELETERS || '0');
|
||||
if (delete_vu_count > 0) {
|
||||
|
@ -156,7 +159,7 @@ export function obj_write() {
|
|||
sleep(__ENV.SLEEP_WRITE);
|
||||
}
|
||||
|
||||
const key = __ENV.OBJ_NAME || uuidv4();
|
||||
const key = generateS3Key();
|
||||
const bucket = bucket_list[Math.floor(Math.random() * bucket_list.length)];
|
||||
|
||||
const payload = generator.genPayload();
|
||||
|
|
|
@ -5,10 +5,10 @@ import registry from 'k6/x/frostfs/registry';
|
|||
import s3 from 'k6/x/frostfs/s3';
|
||||
import stats from 'k6/x/frostfs/stats';
|
||||
|
||||
import {generateS3Key} from './libs/keygen.js';
|
||||
import {newGenerator} from './libs/datagen.js';
|
||||
import {parseEnv} from './libs/env-parser.js';
|
||||
import {textSummary} from './libs/k6-summary-0.0.2.js';
|
||||
import {uuidv4} from './libs/k6-utils-1.4.0.js';
|
||||
|
||||
parseEnv();
|
||||
|
||||
|
@ -101,7 +101,7 @@ export function obj_write_multipart() {
|
|||
sleep(__ENV.SLEEP_WRITE);
|
||||
}
|
||||
|
||||
const key = __ENV.OBJ_NAME || uuidv4();
|
||||
const key = generateS3Key();
|
||||
const bucket = bucket_list[Math.floor(Math.random() * bucket_list.length)];
|
||||
|
||||
const payload = generator.genPayload();
|
||||
|
|
|
@ -5,6 +5,7 @@ import registry from 'k6/x/frostfs/registry';
|
|||
import s3local from 'k6/x/frostfs/s3local';
|
||||
import stats from 'k6/x/frostfs/stats';
|
||||
|
||||
import {generateS3Key} from './libs/keygen.js';
|
||||
import {newGenerator} from './libs/datagen.js';
|
||||
import {parseEnv} from './libs/env-parser.js';
|
||||
import {textSummary} from './libs/k6-summary-0.0.2.js';
|
||||
|
@ -131,7 +132,7 @@ export function handleSummary(data) {
|
|||
}
|
||||
|
||||
export function obj_write() {
|
||||
const key = __ENV.OBJ_NAME || uuidv4();
|
||||
const key = generateS3Key();
|
||||
const bucket = bucket_list[Math.floor(Math.random() * bucket_list.length)];
|
||||
|
||||
const payload = generator.genPayload();
|
||||
|
|
|
@ -44,9 +44,8 @@ if (__ENV.GRPC_ENDPOINTS) {
|
|||
grpc_client = native.connect(
|
||||
grpcEndpoint, '', __ENV.DIAL_TIMEOUT ? parseInt(__ENV.DIAL_TIMEOUT) : 0,
|
||||
__ENV.STREAM_TIMEOUT ? parseInt(__ENV.STREAM_TIMEOUT) : 0,
|
||||
__ENV.PREPARE_LOCALLY ? __ENV.PREPARE_LOCALLY.toLowerCase() === 'true' :
|
||||
false,
|
||||
'');
|
||||
__ENV.PREPARE_LOCALLY ? __ENV.PREPARE_LOCALLY.toLowerCase() === 'true' : false,
|
||||
1024 * parseInt(__ENV.MAX_OBJECT_SIZE || '0'));
|
||||
}
|
||||
|
||||
// Connect to random S3 endpoint
|
||||
|
|
Loading…
Reference in a new issue