Compare commits

..

No commits in common. "master" and "master" have entirely different histories.

25 changed files with 58 additions and 382 deletions

View file

@ -13,7 +13,7 @@ jobs:
- name: Setup Go - name: Setup Go
uses: actions/setup-go@v3 uses: actions/setup-go@v3
with: with:
go-version: '1.22' go-version: '1.21'
- name: Run commit format checker - name: Run commit format checker
uses: https://git.frostfs.info/TrueCloudLab/dco-go@v2 uses: https://git.frostfs.info/TrueCloudLab/dco-go@v2

View file

@ -11,21 +11,20 @@ jobs:
- name: Set up Go - name: Set up Go
uses: actions/setup-go@v3 uses: actions/setup-go@v3
with: with:
go-version: '1.23' go-version: '1.22'
cache: true cache: true
- name: Install linters - name: golangci-lint
run: make lint-install uses: https://github.com/golangci/golangci-lint-action@v3
with:
- name: Run linters version: latest
run: make lint
tests: tests:
name: Tests name: Tests
runs-on: ubuntu-latest runs-on: ubuntu-latest
strategy: strategy:
matrix: matrix:
go_versions: [ '1.22', '1.23' ] go_versions: [ '1.21', '1.22' ]
fail-fast: false fail-fast: false
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
@ -48,7 +47,7 @@ jobs:
- name: Set up Go - name: Set up Go
uses: actions/setup-go@v3 uses: actions/setup-go@v3
with: with:
go-version: '1.22' go-version: '1.21'
cache: true cache: true
- name: Run tests - name: Run tests

View file

@ -3,8 +3,8 @@
First, thank you for contributing! We love and encourage pull requests from First, thank you for contributing! We love and encourage pull requests from
everyone. Please follow the guidelines: everyone. Please follow the guidelines:
- Check the open [issues](https://git.frostfs.info/TrueCloudLab/xk6-frostfs/issues) and - Check the open [issues](https://github.com/TrueCloudLab/xk6-frostfs/issues) and
[pull requests](https://git.frostfs.info/TrueCloudLab/xk6-frostfs/pulls) for existing [pull requests](https://github.com/TrueCloudLab/xk6-frostfs/pulls) for existing
discussions. discussions.
- Open an issue first, to discuss a new feature or enhancement. - Open an issue first, to discuss a new feature or enhancement.
@ -27,20 +27,19 @@ Start by forking the `xk6-frostfs` repository, make changes in a branch and then
send a pull request. We encourage pull requests to discuss code changes. Here send a pull request. We encourage pull requests to discuss code changes. Here
are the steps in details: are the steps in details:
### Set up your repository ### Set up your GitHub Repository
Fork [xk6-frostfs upstream](https://github.com/TrueCloudLab/xk6-frostfs/fork) source
Fork [xk6-frostfs upstream](https://git.frostfs.info/TrueCloudLab/xk6-frostfs/fork) source
repository to your own personal repository. Copy the URL of your fork (you will repository to your own personal repository. Copy the URL of your fork (you will
need it for the `git clone` command below). need it for the `git clone` command below).
```sh ```sh
$ git clone https://git.frostfs.info/TrueCloudLab/xk6-frostfs $ git clone https://github.com/TrueCloudLab/xk6-frostfs
``` ```
### Set up git remote as ``upstream`` ### Set up git remote as ``upstream``
```sh ```sh
$ cd xk6-frostfs $ cd xk6-frostfs
$ git remote add upstream https://git.frostfs.info/TrueCloudLab/xk6-frostfs $ git remote add upstream https://github.com/TrueCloudLab/xk6-frostfs
$ git fetch upstream $ git fetch upstream
$ git merge upstream/master $ git merge upstream/master
... ...
@ -90,7 +89,7 @@ $ git push origin feature/123-something_awesome
``` ```
### Create a Pull Request ### Create a Pull Request
Pull requests can be created via git.frostfs.info. Refer to [this Pull requests can be created via GitHub. Refer to [this
document](https://help.github.com/articles/creating-a-pull-request/) for document](https://help.github.com/articles/creating-a-pull-request/) for
detailed steps on how to create a pull request. After a Pull Request gets peer detailed steps on how to create a pull request. After a Pull Request gets peer
reviewed and approved, it will be merged. reviewed and approved, it will be merged.

View file

@ -3,15 +3,10 @@
# Common variables # Common variables
REPO ?= $(shell go list -m) REPO ?= $(shell go list -m)
VERSION ?= $(shell git describe --tags --dirty --match "v*" --always --abbrev=8 2>/dev/null || cat VERSION 2>/dev/null || echo "develop") VERSION ?= $(shell git describe --tags --dirty --match "v*" --always --abbrev=8 2>/dev/null || cat VERSION 2>/dev/null || echo "develop")
GO_VERSION ?= 1.22 GO_VERSION ?= 1.19
LINT_VERSION ?= 1.60.3 LINT_VERSION ?= 1.49.0
TRUECLOUDLAB_LINT_VERSION ?= 0.0.7
BINDIR = bin BINDIR = bin
OUTPUT_LINT_DIR ?= $(abspath $(BINDIR))/linters
LINT_DIR = $(OUTPUT_LINT_DIR)/golangci-lint-$(LINT_VERSION)-v$(TRUECLOUDLAB_LINT_VERSION)
TMP_DIR := .cache
# Binaries to build # Binaries to build
CMDS = $(addprefix frostfs-, $(notdir $(wildcard cmd/*))) CMDS = $(addprefix frostfs-, $(notdir $(wildcard cmd/*)))
BINS = $(addprefix $(BINDIR)/, $(CMDS)) BINS = $(addprefix $(BINDIR)/, $(CMDS))
@ -69,22 +64,7 @@ format:
# Run linters # Run linters
lint: lint:
@if [ ! -d "$(LINT_DIR)" ]; then \ @golangci-lint --timeout=5m run
make lint-install; \
fi
$(LINT_DIR)/golangci-lint run --timeout=5m
# Install linters
lint-install:
@rm -rf $(OUTPUT_LINT_DIR)
@mkdir -p $(OUTPUT_LINT_DIR)
@mkdir -p $(TMP_DIR)
@rm -rf $(TMP_DIR)/linters
@git -c advice.detachedHead=false clone --branch v$(TRUECLOUDLAB_LINT_VERSION) https://git.frostfs.info/TrueCloudLab/linters.git $(TMP_DIR)/linters
@@make -C $(TMP_DIR)/linters lib CGO_ENABLED=1 OUT_DIR=$(OUTPUT_LINT_DIR)
@rm -rf $(TMP_DIR)/linters
@rmdir $(TMP_DIR) 2>/dev/null || true
@CGO_ENABLED=1 GOBIN=$(LINT_DIR) go install -trimpath github.com/golangci/golangci-lint/cmd/golangci-lint@v$(LINT_VERSION)
# Run linters in Docker # Run linters in Docker
docker/lint: docker/lint:

View file

@ -1,5 +1,5 @@
<p align="center"> <p align="center">
<img src="./.forgejo/logo.svg" width="500px" alt="FrostFS logo"> <img src="./.github/logo.svg" width="500px" alt="FrostFS logo">
</p> </p>
<p align="center"> <p align="center">
<a href="https://go.k6.io/k6">k6</a> extension to test and benchmark FrostFS related protocols. <a href="https://go.k6.io/k6">k6</a> extension to test and benchmark FrostFS related protocols.
@ -57,7 +57,7 @@ const frostfs_cli = native.connect("s01.frostfs.devenv:8080", "", 0, 0, false, 0
### Methods ### Methods
- `putContainer(params)`. The `params` is a dictionary (e.g. - `putContainer(params)`. The `params` is a dictionary (e.g.
`{placement_policy:'REP 3',name:'container-name',name_global_scope:'false'}`). `{acl:'public-read-write',placement_policy:'REP 3',name:'container-name',name_global_scope:'false'}`).
Returns dictionary with `success` Returns dictionary with `success`
boolean flag, `container_id` string, and `error` string. boolean flag, `container_id` string, and `error` string.
- `setBufferSize(size)`. Sets internal buffer size for data upload and - `setBufferSize(size)`. Sets internal buffer size for data upload and
@ -186,25 +186,6 @@ Flags:
-v, --version version for registry-exporter -v, --version version for registry-exporter
``` ```
## Import pregen into registry db
You can import pregenerated json files into registry bolt db. Use `frostfs-xk6-registry import`. Usage examples are in help:
```shell
$ ./bin/frostfs-xk6-registry import -h
Import objects into registry from pregenerated files
Usage:
xk6-registry import [flags]
Examples:
xk6-registry import registry.bolt preset.json
xk6-registry import registry.bolt preset.json another_preset.json
Flags:
-h, --help help for import
```
# License # License
- [GNU General Public License v3.0](LICENSE) - [GNU General Public License v3.0](LICENSE)

View file

@ -1,55 +0,0 @@
package importer
import (
"encoding/json"
"os"
"git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/registry"
)
type PreGenObj struct {
Bucket string `json:"bucket"`
Object string `json:"object"`
Container string `json:"container"`
}
type PreGenerateInfo struct {
Buckets []string `json:"buckets"`
Containers []string `json:"containers"`
Objects []PreGenObj `json:"objects"`
ObjSize string `json:"obj_size"`
}
// ImportJSONPreGen writes objects from pregenerated JSON file
// to the registry.
// Note that ImportJSONPreGen does not check if object already
// exists in the registry so in case of re-entry the registry
// will have two entities representing the same object.
func ImportJSONPreGen(o *registry.ObjRegistry, filename string) error {
f, err := os.ReadFile(filename)
if err != nil {
return err
}
var pregenInfo PreGenerateInfo
err = json.Unmarshal(f, &pregenInfo)
if err != nil {
return err
}
// AddObject uses DB.Batch to combine concurrent Batch calls
// into a single Bolt transaction. DB.Batch is limited by
// DB.MaxBatchDelay which may affect perfomance.
for _, obj := range pregenInfo.Objects {
if obj.Bucket != "" {
err = o.AddObject("", "", obj.Bucket, obj.Object, "")
} else {
err = o.AddObject(obj.Container, obj.Object, "", "", "")
}
if err != nil {
return err
}
}
return nil
}

View file

@ -1,27 +0,0 @@
package importer
import (
"git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/registry"
"github.com/spf13/cobra"
)
// Cmd represents the import command.
var Cmd = &cobra.Command{
Use: "import",
Short: "Import objects into registry",
Long: "Import objects into registry from pregenerated files",
Example: `xk6-registry import registry.bolt preset.json
xk6-registry import registry.bolt preset.json another_preset.json`,
RunE: runCmd,
Args: cobra.MinimumNArgs(2),
}
func runCmd(cmd *cobra.Command, args []string) error {
objRegistry := registry.NewObjRegistry(cmd.Context(), args[0])
for i := 1; i < len(args); i++ {
if err := ImportJSONPreGen(objRegistry, args[i]); err != nil {
return err
}
}
return nil
}

View file

@ -1,18 +0,0 @@
package main
import (
"context"
"os"
"os/signal"
"syscall"
)
func main() {
ctx, _ := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM, syscall.SIGHUP)
if cmd, err := rootCmd.ExecuteContextC(ctx); err != nil {
cmd.PrintErrln("Error:", err.Error())
cmd.PrintErrf("Run '%v --help' for usage.\n", cmd.CommandPath())
os.Exit(1)
}
}

View file

@ -1,33 +0,0 @@
package main
import (
"runtime"
"git.frostfs.info/TrueCloudLab/xk6-frostfs/cmd/xk6-registry/importer"
"git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/version"
"github.com/spf13/cobra"
)
var rootCmd = &cobra.Command{
Use: "xk6-registry",
Version: version.Version,
Short: "Command Line Tool to work with Registry",
Long: `Registry provides tools to work with object registry for xk6.
It contains command for importing objects in registry from preset`,
SilenceErrors: true,
SilenceUsage: true,
Run: rootCmdRun,
}
func init() {
cobra.AddTemplateFunc("runtimeVersion", runtime.Version)
rootCmd.SetVersionTemplate(`FrostFS xk6-registry
{{printf "Version: %s" .Version }}
GoVersion: {{ runtimeVersion }}
`)
rootCmd.AddCommand(importer.Cmd)
}
func rootCmdRun(cmd *cobra.Command, _ []string) {
_ = cmd.Usage()
}

View file

@ -13,6 +13,7 @@ export const options = {
export function setup() { export function setup() {
const params = { const params = {
acl: 'public-read-write',
placement_policy: 'REP 3', placement_policy: 'REP 3',
name: 'container-name', name: 'container-name',
name_global_scope: 'false' name_global_scope: 'false'

2
go.mod
View file

@ -1,6 +1,6 @@
module git.frostfs.info/TrueCloudLab/xk6-frostfs module git.frostfs.info/TrueCloudLab/xk6-frostfs
go 1.22 go 1.21
require ( require (
git.frostfs.info/TrueCloudLab/frostfs-node v0.38.3-0.20240502170333-ec2873caa7c6 git.frostfs.info/TrueCloudLab/frostfs-node v0.38.3-0.20240502170333-ec2873caa7c6

View file

@ -13,6 +13,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@ -264,6 +265,16 @@ func (c *Client) PutContainer(params map[string]string) PutContainerResponse {
container.SetCreationTime(&cnr, time.Now()) container.SetCreationTime(&cnr, time.Now())
cnr.SetOwner(usr) cnr.SetOwner(usr)
if basicACLStr, ok := params["acl"]; ok {
var basicACL acl.Basic
err := basicACL.DecodeString(basicACLStr)
if err != nil {
return c.putCnrErrorResponse(err)
}
cnr.SetBasicACL(basicACL)
}
placementPolicyStr, ok := params["placement_policy"] placementPolicyStr, ok := params["placement_policy"]
if ok { if ok {
var placementPolicy netmap.PlacementPolicy var placementPolicy netmap.PlacementPolicy

View file

@ -3,7 +3,6 @@ package registry
import ( import (
"context" "context"
"fmt" "fmt"
"sync"
"time" "time"
"github.com/nspcc-dev/neo-go/pkg/io" "github.com/nspcc-dev/neo-go/pkg/io"
@ -24,8 +23,6 @@ type ObjSelector struct {
filter *ObjFilter filter *ObjFilter
cacheSize int cacheSize int
kind SelectorKind kind SelectorKind
// Sync synchronizes VU used for deletion.
Sync sync.WaitGroup
} }
// objectSelectCache is the default maximum size of a batch to select from DB. // objectSelectCache is the default maximum size of a batch to select from DB.

View file

@ -142,70 +142,6 @@ func (c *Client) Get(bucket, key string) GetResponse {
return GetResponse{Success: true} return GetResponse{Success: true}
} }
// DeleteObjectVersion deletes object version with specified versionID.
// If version argument is empty, deletes all versions and delete-markers of specified object.
func (c *Client) DeleteObjectVersion(bucket, key, version string) DeleteResponse {
var toDelete []types.ObjectIdentifier
if version != "" {
toDelete = append(toDelete, types.ObjectIdentifier{
Key: aws.String(key),
VersionId: aws.String(version),
})
} else {
versions, err := c.cli.ListObjectVersions(c.vu.Context(), &s3.ListObjectVersionsInput{
Bucket: aws.String(bucket),
Prefix: aws.String(key),
})
if err != nil {
stats.Report(c.vu, objDeleteFails, 1)
return DeleteResponse{Success: false, Error: err.Error()}
}
toDelete = filterObjectVersions(versions, key)
}
if len(toDelete) == 0 {
return c.Delete(bucket, key)
} else {
_, err := c.cli.DeleteObjects(c.vu.Context(), &s3.DeleteObjectsInput{
Bucket: aws.String(bucket),
Delete: &types.Delete{
Objects: toDelete,
Quiet: true,
},
})
if err != nil {
stats.Report(c.vu, objDeleteFails, 1)
return DeleteResponse{Success: false, Error: err.Error()}
}
}
return DeleteResponse{Success: true}
}
func filterObjectVersions(versions *s3.ListObjectVersionsOutput, key string) []types.ObjectIdentifier {
var result []types.ObjectIdentifier
for _, v := range versions.Versions {
if *v.Key == key {
result = append(result, types.ObjectIdentifier{
Key: v.Key,
VersionId: v.VersionId,
})
}
}
for _, marker := range versions.DeleteMarkers {
if *marker.Key == key {
result = append(result, types.ObjectIdentifier{
Key: marker.Key,
VersionId: marker.VersionId,
})
}
}
return result
}
func get( func get(
c *s3.Client, c *s3.Client,
bucket string, bucket string,
@ -279,26 +215,6 @@ func (c *Client) CreateBucket(bucket string, params map[string]string) CreateBuc
return CreateBucketResponse{Success: false, Error: err.Error()} return CreateBucketResponse{Success: false, Error: err.Error()}
} }
var versioning bool
if strVersioned, ok := params["versioning"]; ok {
if versioning, err = strconv.ParseBool(strVersioned); err != nil {
stats.Report(c.vu, createBucketFails, 1)
return CreateBucketResponse{Success: false, Error: err.Error()}
}
}
if versioning {
_, err = c.cli.PutBucketVersioning(context.TODO(), &s3.PutBucketVersioningInput{
Bucket: aws.String(bucket),
VersioningConfiguration: &types.VersioningConfiguration{
Status: types.BucketVersioningStatusEnabled,
},
})
if err != nil {
stats.Report(c.vu, createBucketFails, 1)
return CreateBucketResponse{Success: false, Error: err.Error()}
}
}
stats.Report(c.vu, createBucketSuccess, 1) stats.Report(c.vu, createBucketSuccess, 1)
stats.Report(c.vu, createBucketDuration, metrics.D(time.Since(start))) stats.Report(c.vu, createBucketDuration, metrics.D(time.Since(start)))
return CreateBucketResponse{Success: true} return CreateBucketResponse{Success: true}

View file

@ -1,34 +0,0 @@
import { uuidv4 } from './k6-utils-1.4.0.js';
export function generateS3Key() {
let width = parseInt(__ENV.DIR_WIDTH || '0');
let height = parseInt(__ENV.DIR_HEIGHT || '0');
let key = ''
if (width > 0 && height > 0) {
for (let index = 0; index < height; index++) {
const w = Math.floor(Math.random() * width) + 1;
key = key + 'dir' + w + '/';
}
}
key += objName();
return key;
}
const asciiLetters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890"
function objName() {
if (__ENV.OBJ_NAME) {
return __ENV.OBJ_NAME;
}
const length = parseInt(__ENV.OBJ_NAME_LENGTH || '0');
if (length > 0) {
let name = "";
for (let i = 0; i < length; i++) {
name += asciiLetters.charAt(Math.floor(Math.random() * asciiLetters.length));
}
return name;
}
return uuidv4();
}

View file

@ -15,7 +15,7 @@ def create_bucket(endpoint, versioning, location, acl, no_verify_ssl):
cmd_line = f"aws {no_verify_ssl_str} s3api create-bucket --bucket {bucket_name} " \ cmd_line = f"aws {no_verify_ssl_str} s3api create-bucket --bucket {bucket_name} " \
f"--endpoint {endpoint} {configuration} {acl} " f"--endpoint {endpoint} {configuration} {acl} "
cmd_line_ver = f"aws {no_verify_ssl_str} s3api put-bucket-versioning --bucket {bucket_name} " \ cmd_line_ver = f"aws {no_verify_ssl_str} s3api put-bucket-versioning --bucket {bucket_name} " \
f"--versioning-configuration Status=Enabled --endpoint {endpoint}" f"--versioning-configuration Status=Enabled --endpoint {endpoint} {acl} "
output, success = execute_cmd(cmd_line) output, success = execute_cmd(cmd_line)
@ -25,7 +25,7 @@ def create_bucket(endpoint, versioning, location, acl, no_verify_ssl):
f"Error: {output}", endpoint) f"Error: {output}", endpoint)
return False return False
if versioning: if versioning == "True":
output, success = execute_cmd(cmd_line_ver) output, success = execute_cmd(cmd_line_ver)
if not success: if not success:
log(f"{cmd_line_ver}\n" log(f"{cmd_line_ver}\n"

View file

@ -1,16 +1,18 @@
import re import re
from helpers.cmd import execute_cmd, log from helpers.cmd import execute_cmd, log
def create_container(endpoint, policy, container_creation_retry, wallet_path, config, rules, local=False, retry=0): def create_container(endpoint, policy, wallet_path, config, acl, local=False, depth=0):
if retry > int(container_creation_retry): if depth > 20:
raise ValueError(f"unable to create container: too many unsuccessful attempts") raise ValueError(f"unable to create container: too many unsuccessful attempts")
if wallet_path: if wallet_path:
wallet_file = f"--wallet {wallet_path}" wallet_file = f"--wallet {wallet_path}"
if config: if config:
wallet_config = f"--config {config}" wallet_config = f"--config {config}"
if acl:
acl_param = f"--basic-acl {acl}"
cmd_line = f"frostfs-cli --rpc-endpoint {endpoint} container create {wallet_file} {wallet_config} " \ cmd_line = f"frostfs-cli --rpc-endpoint {endpoint} container create {wallet_file} {wallet_config} " \
f" --policy '{policy}' --await" f" --policy '{policy}' {acl_param} --await"
output, success = execute_cmd(cmd_line) output, success = execute_cmd(cmd_line)
@ -34,20 +36,6 @@ def create_container(endpoint, policy, container_creation_retry, wallet_path, co
log(f"Created container: {cid} ({policy})", endpoint) log(f"Created container: {cid} ({policy})", endpoint)
# Add rule for container
if rules:
r = ""
for rule in rules:
r += f" --rule '{rule}' "
cmd_line = f"frostfs-cli --rpc-endpoint {endpoint} ape-manager add {wallet_file} {wallet_config} " \
f" --chain-id 'chain-id' {r} --target-name '{cid}' --target-type 'container'"
output, success = execute_cmd(cmd_line)
if not success:
log(f"{cmd_line}\n"
f"Rule has not been added\n"
f"{output}", endpoint)
return False
if not local: if not local:
return cid return cid
@ -100,7 +88,7 @@ def create_container(endpoint, policy, container_creation_retry, wallet_path, co
return cid return cid
log(f"Created container {cid} is not stored on {endpoint}, creating another one...", endpoint) log(f"Created container {cid} is not stored on {endpoint}, creating another one...", endpoint)
return create_container(endpoint, policy, container_creation_retry, wallet_path, config, rules, local, retry + 1) return create_container(endpoint, policy, wallet_path, config, acl, local, depth + 1)
def upload_object(container, payload_filepath, endpoint, wallet_file, wallet_config): def upload_object(container, payload_filepath, endpoint, wallet_file, wallet_config):

View file

@ -16,12 +16,10 @@ ERROR_WRONG_CONTAINERS_COUNT = 1
ERROR_WRONG_OBJECTS_COUNT = 2 ERROR_WRONG_OBJECTS_COUNT = 2
MAX_WORKERS = 50 MAX_WORKERS = 50
DEFAULT_POLICY = "REP 2 IN X CBF 2 SELECT 2 FROM * AS X" DEFAULT_POLICY = "REP 2 IN X CBF 2 SELECT 2 FROM * AS X"
DEFAULT_RULES = ["allow Object.* *"]
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument('--size', help='Upload objects size in kb') parser.add_argument('--size', help='Upload objects size in kb')
parser.add_argument('--containers', help='Number of containers to create') parser.add_argument('--containers', help='Number of containers to create')
parser.add_argument('--retry', default=20, help='Maximum number of retries to create a container')
parser.add_argument('--out', help='JSON file with output') parser.add_argument('--out', help='JSON file with output')
parser.add_argument('--preload_obj', help='Number of pre-loaded objects') parser.add_argument('--preload_obj', help='Number of pre-loaded objects')
parser.add_argument('--wallet', help='Wallet file path') parser.add_argument('--wallet', help='Wallet file path')
@ -38,10 +36,7 @@ parser.add_argument('--workers', help='Count of workers in preset. Max = 50, Def
parser.add_argument('--sleep', help='Time to sleep between containers creation and objects upload (in seconds), ' parser.add_argument('--sleep', help='Time to sleep between containers creation and objects upload (in seconds), '
'Default = 8', default=8) 'Default = 8', default=8)
parser.add_argument('--local', help='Create containers that store data on provided endpoints. Warning: additional empty containers may be created.', action='store_true') parser.add_argument('--local', help='Create containers that store data on provided endpoints. Warning: additional empty containers may be created.', action='store_true')
parser.add_argument( parser.add_argument('--acl', help='Container ACL. Default is public-read-write.', default='public-read-write')
'--rule',
help='Rule attached to created containers. All entries of CONTAINER_ID will be replaced with id of created container.',
action="append")
args: Namespace = parser.parse_args() args: Namespace = parser.parse_args()
print(args) print(args)
@ -55,14 +50,10 @@ def main():
if not args.policy: if not args.policy:
args.policy = [DEFAULT_POLICY] args.policy = [DEFAULT_POLICY]
container_creation_retry = args.retry
wallet = args.wallet wallet = args.wallet
wallet_config = args.config wallet_config = args.config
workers = int(args.workers) workers = int(args.workers)
objects_per_container = int(args.preload_obj) objects_per_container = int(args.preload_obj)
rules = args.rule
if not rules:
rules = DEFAULT_RULES
ignore_errors = args.ignore_errors ignore_errors = args.ignore_errors
if args.update: if args.update:
@ -75,7 +66,7 @@ def main():
containers_count = int(args.containers) containers_count = int(args.containers)
print(f"Create containers: {containers_count}") print(f"Create containers: {containers_count}")
with ProcessPoolExecutor(max_workers=min(MAX_WORKERS, workers)) as executor: with ProcessPoolExecutor(max_workers=min(MAX_WORKERS, workers)) as executor:
containers_runs = [executor.submit(create_container, endpoint, policy, container_creation_retry, wallet, wallet_config, rules, args.local) containers_runs = [executor.submit(create_container, endpoint, policy, wallet, wallet_config, args.acl, args.local)
for _, endpoint, policy in for _, endpoint, policy in
zip(range(containers_count), cycle(endpoints), cycle(args.policy))] zip(range(containers_count), cycle(endpoints), cycle(args.policy))]

View file

@ -13,7 +13,6 @@ from helpers.aws_cli import create_bucket, upload_object
ERROR_WRONG_CONTAINERS_COUNT = 1 ERROR_WRONG_CONTAINERS_COUNT = 1
ERROR_WRONG_OBJECTS_COUNT = 2 ERROR_WRONG_OBJECTS_COUNT = 2
ERROR_WRONG_PERCENTAGE = 3
MAX_WORKERS = 50 MAX_WORKERS = 50
DEFAULT_LOCATION = "" DEFAULT_LOCATION = ""
@ -27,8 +26,7 @@ parser.add_argument('--endpoint', help='S3 Gateways addresses separated by comma
parser.add_argument('--update', help='True/False, False by default. Save existed buckets from target file (--out). ' parser.add_argument('--update', help='True/False, False by default. Save existed buckets from target file (--out). '
'New buckets will not be created.') 'New buckets will not be created.')
parser.add_argument('--location', help=f'AWS location constraint. Default is "{DEFAULT_LOCATION}"', action="append") parser.add_argument('--location', help=f'AWS location constraint. Default is "{DEFAULT_LOCATION}"', action="append")
parser.add_argument('--versioning', help='True/False, False by default. Alias of --buckets_versioned=100') parser.add_argument('--versioning', help='True/False, False by default.')
parser.add_argument('--buckets_versioned', help='Percent of versioned buckets. Default is 0', default=0)
parser.add_argument('--ignore-errors', help='Ignore preset errors', action='store_true') parser.add_argument('--ignore-errors', help='Ignore preset errors', action='store_true')
parser.add_argument('--no-verify-ssl', help='Ignore SSL verifications', action='store_true') parser.add_argument('--no-verify-ssl', help='Ignore SSL verifications', action='store_true')
parser.add_argument('--workers', help='Count of workers in preset. Max = 50, Default = 50', default=50) parser.add_argument('--workers', help='Count of workers in preset. Max = 50, Default = 50', default=50)
@ -64,17 +62,8 @@ def main():
print(f"Create buckets: {buckets_count}") print(f"Create buckets: {buckets_count}")
with ProcessPoolExecutor(max_workers=min(MAX_WORKERS, workers)) as executor: with ProcessPoolExecutor(max_workers=min(MAX_WORKERS, workers)) as executor:
if not 0 <= int(args.buckets_versioned) <= 100: buckets_runs = [executor.submit(create_bucket, endpoint, args.versioning, location, args.acl, no_verify_ssl)
print(f"Percent of versioned buckets must be between 0 and 100: got {args.buckets_versioned}") for _, endpoint, location in
if not ignore_errors:
sys.exit(ERROR_WRONG_PERCENTAGE)
if args.versioning == "True":
versioning_per_bucket = [True] * buckets_count
else:
num_versioned_buckets = int((int(args.buckets_versioned) / 100) * buckets_count)
versioning_per_bucket = [True] * num_versioned_buckets + [False] * (buckets_count - num_versioned_buckets)
buckets_runs = [executor.submit(create_bucket, endpoint, versioning_per_bucket[i], location, args.acl, no_verify_ssl)
for i, endpoint, location in
zip(range(buckets_count), cycle(endpoints), cycle(args.location))] zip(range(buckets_count), cycle(endpoints), cycle(args.location))]
for run in buckets_runs: for run in buckets_runs:

View file

@ -125,7 +125,7 @@ The tests will use all pre-created buckets for PUT operations and all pre-create
$ ./scenarios/preset/preset_s3.py --size 1024 --buckets 1 --out s3_1024kb.json --endpoint host1:8084 --preload_obj 500 --location load-1-4 $ ./scenarios/preset/preset_s3.py --size 1024 --buckets 1 --out s3_1024kb.json --endpoint host1:8084 --preload_obj 500 --location load-1-4
``` ```
* '--location' - specify the name of container policy (from policy.json file). It's important to run 'aws configure' each time when the policy file has been changed to pick up the latest policies. * '--location' - specify the name of container policy (from policy.json file). It's important to run 'aws configure' each time when the policy file has been changed to pick up the latest policies.
* '--buckets_versioned' - specify the percentage of versioned buckets from the total number of created buckets. Default is 0
3. Execute scenario with options: 3. Execute scenario with options:
```shell ```shell
@ -138,8 +138,6 @@ Options (in addition to the common options):
* `DELETE_AGE` - age of object in seconds before which it can not be deleted. This parameter can be used to control how many objects we have in the system under load. * `DELETE_AGE` - age of object in seconds before which it can not be deleted. This parameter can be used to control how many objects we have in the system under load.
* `SLEEP_DELETE` - time interval (in seconds) between deleting VU iterations. * `SLEEP_DELETE` - time interval (in seconds) between deleting VU iterations.
* `OBJ_NAME` - if specified, this name will be used for all write operations instead of random generation. * `OBJ_NAME` - if specified, this name will be used for all write operations instead of random generation.
* `OBJ_NAME_LENGTH` - if specified, then name of the object will be generated with the specified length of ASCII characters.
* `DIR_HEIGHT`, `DIR_WIDTH` - if both specified, object name will consist of `DIR_HEIGHT` directories, each of which can have `DIR_WIDTH` subdirectories, for example for `DIR_HEIGHT = 3, DIR_WIDTH = 100`, object names will be `/dir{1...100}/dir{1...100}/dir{1...100}/{uuid || OBJ_NAME}`
## S3 Multipart ## S3 Multipart

View file

@ -6,10 +6,10 @@ import registry from 'k6/x/frostfs/registry';
import s3 from 'k6/x/frostfs/s3'; import s3 from 'k6/x/frostfs/s3';
import stats from 'k6/x/frostfs/stats'; import stats from 'k6/x/frostfs/stats';
import {generateS3Key} from './libs/keygen.js'; import {newGenerator} from './libs/datagen.js';
import {parseEnv} from './libs/env-parser.js'; import {parseEnv} from './libs/env-parser.js';
import {textSummary} from './libs/k6-summary-0.0.2.js'; import {textSummary} from './libs/k6-summary-0.0.2.js';
import {newGenerator} from './libs/datagen.js'; import {uuidv4} from './libs/k6-utils-1.4.0.js';
parseEnv(); parseEnv();
@ -132,10 +132,6 @@ export function setup() {
const start_timestamp = Date.now() const start_timestamp = Date.now()
console.log( console.log(
`Load started at: ${Date(start_timestamp).toString()}`) `Load started at: ${Date(start_timestamp).toString()}`)
if (delete_vu_count > 0){
obj_to_delete_selector.sync.add(delete_vu_count)
}
} }
export function teardown(data) { export function teardown(data) {
@ -159,7 +155,7 @@ export function obj_write() {
sleep(__ENV.SLEEP_WRITE); sleep(__ENV.SLEEP_WRITE);
} }
const key = generateS3Key(); const key = __ENV.OBJ_NAME || uuidv4();
const bucket = bucket_list[Math.floor(Math.random() * bucket_list.length)]; const bucket = bucket_list[Math.floor(Math.random() * bucket_list.length)];
const payload = generator.genPayload(); const payload = generator.genPayload();
@ -208,8 +204,6 @@ export function obj_delete() {
const obj = obj_to_delete_selector.nextObject(); const obj = obj_to_delete_selector.nextObject();
if (!obj) { if (!obj) {
if (obj_to_delete_exit_on_null) { if (obj_to_delete_exit_on_null) {
obj_to_delete_selector.sync.done()
obj_to_delete_selector.sync.wait()
exec.test.abort("No more objects to select"); exec.test.abort("No more objects to select");
} }
return; return;

View file

@ -5,10 +5,10 @@ import registry from 'k6/x/frostfs/registry';
import s3 from 'k6/x/frostfs/s3'; import s3 from 'k6/x/frostfs/s3';
import stats from 'k6/x/frostfs/stats'; import stats from 'k6/x/frostfs/stats';
import {generateS3Key} from './libs/keygen.js';
import {newGenerator} from './libs/datagen.js'; import {newGenerator} from './libs/datagen.js';
import {parseEnv} from './libs/env-parser.js'; import {parseEnv} from './libs/env-parser.js';
import {textSummary} from './libs/k6-summary-0.0.2.js'; import {textSummary} from './libs/k6-summary-0.0.2.js';
import {uuidv4} from './libs/k6-utils-1.4.0.js';
parseEnv(); parseEnv();
@ -177,7 +177,7 @@ export function obj_write() {
sleep(__ENV.SLEEP_WRITE); sleep(__ENV.SLEEP_WRITE);
} }
const key = generateS3Key(); const key = __ENV.OBJ_NAME || uuidv4();
const bucket = bucket_list[Math.floor(Math.random() * bucket_list.length)]; const bucket = bucket_list[Math.floor(Math.random() * bucket_list.length)];
const payload = generator.genPayload(); const payload = generator.genPayload();

View file

@ -6,10 +6,10 @@ import registry from 'k6/x/frostfs/registry';
import s3 from 'k6/x/frostfs/s3'; import s3 from 'k6/x/frostfs/s3';
import stats from 'k6/x/frostfs/stats'; import stats from 'k6/x/frostfs/stats';
import {generateS3Key} from './libs/keygen.js';
import {newGenerator} from './libs/datagen.js'; import {newGenerator} from './libs/datagen.js';
import {parseEnv} from './libs/env-parser.js'; import {parseEnv} from './libs/env-parser.js';
import {textSummary} from './libs/k6-summary-0.0.2.js'; import {textSummary} from './libs/k6-summary-0.0.2.js';
import {uuidv4} from './libs/k6-utils-1.4.0.js';
parseEnv(); parseEnv();
@ -159,7 +159,7 @@ export function obj_write() {
sleep(__ENV.SLEEP_WRITE); sleep(__ENV.SLEEP_WRITE);
} }
const key = generateS3Key(); const key = __ENV.OBJ_NAME || uuidv4();
const bucket = bucket_list[Math.floor(Math.random() * bucket_list.length)]; const bucket = bucket_list[Math.floor(Math.random() * bucket_list.length)];
const payload = generator.genPayload(); const payload = generator.genPayload();

View file

@ -5,10 +5,10 @@ import registry from 'k6/x/frostfs/registry';
import s3 from 'k6/x/frostfs/s3'; import s3 from 'k6/x/frostfs/s3';
import stats from 'k6/x/frostfs/stats'; import stats from 'k6/x/frostfs/stats';
import {generateS3Key} from './libs/keygen.js';
import {newGenerator} from './libs/datagen.js'; import {newGenerator} from './libs/datagen.js';
import {parseEnv} from './libs/env-parser.js'; import {parseEnv} from './libs/env-parser.js';
import {textSummary} from './libs/k6-summary-0.0.2.js'; import {textSummary} from './libs/k6-summary-0.0.2.js';
import {uuidv4} from './libs/k6-utils-1.4.0.js';
parseEnv(); parseEnv();
@ -101,7 +101,7 @@ export function obj_write_multipart() {
sleep(__ENV.SLEEP_WRITE); sleep(__ENV.SLEEP_WRITE);
} }
const key = generateS3Key(); const key = __ENV.OBJ_NAME || uuidv4();
const bucket = bucket_list[Math.floor(Math.random() * bucket_list.length)]; const bucket = bucket_list[Math.floor(Math.random() * bucket_list.length)];
const payload = generator.genPayload(); const payload = generator.genPayload();

View file

@ -5,7 +5,6 @@ import registry from 'k6/x/frostfs/registry';
import s3local from 'k6/x/frostfs/s3local'; import s3local from 'k6/x/frostfs/s3local';
import stats from 'k6/x/frostfs/stats'; import stats from 'k6/x/frostfs/stats';
import {generateS3Key} from './libs/keygen.js';
import {newGenerator} from './libs/datagen.js'; import {newGenerator} from './libs/datagen.js';
import {parseEnv} from './libs/env-parser.js'; import {parseEnv} from './libs/env-parser.js';
import {textSummary} from './libs/k6-summary-0.0.2.js'; import {textSummary} from './libs/k6-summary-0.0.2.js';
@ -132,7 +131,7 @@ export function handleSummary(data) {
} }
export function obj_write() { export function obj_write() {
const key = generateS3Key(); const key = __ENV.OBJ_NAME || uuidv4();
const bucket = bucket_list[Math.floor(Math.random() * bucket_list.length)]; const bucket = bucket_list[Math.floor(Math.random() * bucket_list.length)];
const payload = generator.genPayload(); const payload = generator.genPayload();