Compare commits

..

No commits in common. "master" and "fix/doc-registry-importer" have entirely different histories.

19 changed files with 44 additions and 191 deletions

1
.forgejo/CODEOWNERS Normal file
View file

@ -0,0 +1 @@
* @TrueCloudLab/storage-core @TrueCloudLab/storage-services

View file

@ -1,3 +0,0 @@
.* @TrueCloudLab/storage-core-committers @TrueCloudLab/storage-core-developers @TrueCloudLab/storage-services-committers @TrueCloudLab/storage-services-developers
.forgejo/.* @potyarkin
Makefile @potyarkin

View file

@ -27,8 +27,7 @@ Start by forking the `xk6-frostfs` repository, make changes in a branch and then
send a pull request. We encourage pull requests to discuss code changes. Here send a pull request. We encourage pull requests to discuss code changes. Here
are the steps in details: are the steps in details:
### Set up your repository ### Set up your GitHub Repository
Fork [xk6-frostfs upstream](https://git.frostfs.info/TrueCloudLab/xk6-frostfs/fork) source Fork [xk6-frostfs upstream](https://git.frostfs.info/TrueCloudLab/xk6-frostfs/fork) source
repository to your own personal repository. Copy the URL of your fork (you will repository to your own personal repository. Copy the URL of your fork (you will
need it for the `git clone` command below). need it for the `git clone` command below).
@ -90,7 +89,7 @@ $ git push origin feature/123-something_awesome
``` ```
### Create a Pull Request ### Create a Pull Request
Pull requests can be created via git.frostfs.info. Refer to [this Pull requests can be created via GitHub. Refer to [this
document](https://help.github.com/articles/creating-a-pull-request/) for document](https://help.github.com/articles/creating-a-pull-request/) for
detailed steps on how to create a pull request. After a Pull Request gets peer detailed steps on how to create a pull request. After a Pull Request gets peer
reviewed and approved, it will be merged. reviewed and approved, it will be merged.

View file

@ -57,7 +57,7 @@ const frostfs_cli = native.connect("s01.frostfs.devenv:8080", "", 0, 0, false, 0
### Methods ### Methods
- `putContainer(params)`. The `params` is a dictionary (e.g. - `putContainer(params)`. The `params` is a dictionary (e.g.
`{placement_policy:'REP 3',name:'container-name',name_global_scope:'false'}`). `{acl:'public-read-write',placement_policy:'REP 3',name:'container-name',name_global_scope:'false'}`).
Returns dictionary with `success` Returns dictionary with `success`
boolean flag, `container_id` string, and `error` string. boolean flag, `container_id` string, and `error` string.
- `setBufferSize(size)`. Sets internal buffer size for data upload and - `setBufferSize(size)`. Sets internal buffer size for data upload and

View file

@ -13,6 +13,7 @@ export const options = {
export function setup() { export function setup() {
const params = { const params = {
acl: 'public-read-write',
placement_policy: 'REP 3', placement_policy: 'REP 3',
name: 'container-name', name: 'container-name',
name_global_scope: 'false' name_global_scope: 'false'

View file

@ -13,6 +13,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@ -264,6 +265,16 @@ func (c *Client) PutContainer(params map[string]string) PutContainerResponse {
container.SetCreationTime(&cnr, time.Now()) container.SetCreationTime(&cnr, time.Now())
cnr.SetOwner(usr) cnr.SetOwner(usr)
if basicACLStr, ok := params["acl"]; ok {
var basicACL acl.Basic
err := basicACL.DecodeString(basicACLStr)
if err != nil {
return c.putCnrErrorResponse(err)
}
cnr.SetBasicACL(basicACL)
}
placementPolicyStr, ok := params["placement_policy"] placementPolicyStr, ok := params["placement_policy"]
if ok { if ok {
var placementPolicy netmap.PlacementPolicy var placementPolicy netmap.PlacementPolicy

View file

@ -118,16 +118,11 @@ func (n *Native) Connect(endpoint, hexPrivateKey string, dialTimeout, streamTime
tok.SetAuthKey(&key) tok.SetAuthKey(&key)
tok.SetExp(exp) tok.SetExp(exp)
res, err := cli.NetworkInfo(n.vu.Context(), client.PrmNetworkInfo{})
if err != nil {
return nil, err
}
prevEpoch := res.Info().CurrentEpoch() - 1
tok.SetNbf(prevEpoch)
tok.SetIat(prevEpoch)
if prepareLocally && maxObjSize > 0 { if prepareLocally && maxObjSize > 0 {
res, err := cli.NetworkInfo(n.vu.Context(), client.PrmNetworkInfo{})
if err != nil {
return nil, err
}
if uint64(maxObjSize) > res.Info().MaxObjectSize() { if uint64(maxObjSize) > res.Info().MaxObjectSize() {
return nil, fmt.Errorf("max object size must be not greater than %d bytes", res.Info().MaxObjectSize()) return nil, fmt.Errorf("max object size must be not greater than %d bytes", res.Info().MaxObjectSize())
} }

View file

@ -142,70 +142,6 @@ func (c *Client) Get(bucket, key string) GetResponse {
return GetResponse{Success: true} return GetResponse{Success: true}
} }
// DeleteObjectVersion deletes object version with specified versionID.
// If version argument is empty, deletes all versions and delete-markers of specified object.
func (c *Client) DeleteObjectVersion(bucket, key, version string) DeleteResponse {
var toDelete []types.ObjectIdentifier
if version != "" {
toDelete = append(toDelete, types.ObjectIdentifier{
Key: aws.String(key),
VersionId: aws.String(version),
})
} else {
versions, err := c.cli.ListObjectVersions(c.vu.Context(), &s3.ListObjectVersionsInput{
Bucket: aws.String(bucket),
Prefix: aws.String(key),
})
if err != nil {
stats.Report(c.vu, objDeleteFails, 1)
return DeleteResponse{Success: false, Error: err.Error()}
}
toDelete = filterObjectVersions(versions, key)
}
if len(toDelete) == 0 {
return c.Delete(bucket, key)
} else {
_, err := c.cli.DeleteObjects(c.vu.Context(), &s3.DeleteObjectsInput{
Bucket: aws.String(bucket),
Delete: &types.Delete{
Objects: toDelete,
Quiet: true,
},
})
if err != nil {
stats.Report(c.vu, objDeleteFails, 1)
return DeleteResponse{Success: false, Error: err.Error()}
}
}
return DeleteResponse{Success: true}
}
func filterObjectVersions(versions *s3.ListObjectVersionsOutput, key string) []types.ObjectIdentifier {
var result []types.ObjectIdentifier
for _, v := range versions.Versions {
if *v.Key == key {
result = append(result, types.ObjectIdentifier{
Key: v.Key,
VersionId: v.VersionId,
})
}
}
for _, marker := range versions.DeleteMarkers {
if *marker.Key == key {
result = append(result, types.ObjectIdentifier{
Key: marker.Key,
VersionId: marker.VersionId,
})
}
}
return result
}
func get( func get(
c *s3.Client, c *s3.Client,
bucket string, bucket string,
@ -279,26 +215,6 @@ func (c *Client) CreateBucket(bucket string, params map[string]string) CreateBuc
return CreateBucketResponse{Success: false, Error: err.Error()} return CreateBucketResponse{Success: false, Error: err.Error()}
} }
var versioning bool
if strVersioned, ok := params["versioning"]; ok {
if versioning, err = strconv.ParseBool(strVersioned); err != nil {
stats.Report(c.vu, createBucketFails, 1)
return CreateBucketResponse{Success: false, Error: err.Error()}
}
}
if versioning {
_, err = c.cli.PutBucketVersioning(context.TODO(), &s3.PutBucketVersioningInput{
Bucket: aws.String(bucket),
VersioningConfiguration: &types.VersioningConfiguration{
Status: types.BucketVersioningStatusEnabled,
},
})
if err != nil {
stats.Report(c.vu, createBucketFails, 1)
return CreateBucketResponse{Success: false, Error: err.Error()}
}
}
stats.Report(c.vu, createBucketSuccess, 1) stats.Report(c.vu, createBucketSuccess, 1)
stats.Report(c.vu, createBucketDuration, metrics.D(time.Since(start))) stats.Report(c.vu, createBucketDuration, metrics.D(time.Since(start)))
return CreateBucketResponse{Success: true} return CreateBucketResponse{Success: true}

View file

@ -1,34 +0,0 @@
import { uuidv4 } from './k6-utils-1.4.0.js';
export function generateS3Key() {
let width = parseInt(__ENV.DIR_WIDTH || '0');
let height = parseInt(__ENV.DIR_HEIGHT || '0');
let key = ''
if (width > 0 && height > 0) {
for (let index = 0; index < height; index++) {
const w = Math.floor(Math.random() * width) + 1;
key = key + 'dir' + w + '/';
}
}
key += objName();
return key;
}
const asciiLetters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890"
function objName() {
if (__ENV.OBJ_NAME) {
return __ENV.OBJ_NAME;
}
const length = parseInt(__ENV.OBJ_NAME_LENGTH || '0');
if (length > 0) {
let name = "";
for (let i = 0; i < length; i++) {
name += asciiLetters.charAt(Math.floor(Math.random() * asciiLetters.length));
}
return name;
}
return uuidv4();
}

View file

@ -15,7 +15,7 @@ def create_bucket(endpoint, versioning, location, acl, no_verify_ssl):
cmd_line = f"aws {no_verify_ssl_str} s3api create-bucket --bucket {bucket_name} " \ cmd_line = f"aws {no_verify_ssl_str} s3api create-bucket --bucket {bucket_name} " \
f"--endpoint {endpoint} {configuration} {acl} " f"--endpoint {endpoint} {configuration} {acl} "
cmd_line_ver = f"aws {no_verify_ssl_str} s3api put-bucket-versioning --bucket {bucket_name} " \ cmd_line_ver = f"aws {no_verify_ssl_str} s3api put-bucket-versioning --bucket {bucket_name} " \
f"--versioning-configuration Status=Enabled --endpoint {endpoint}" f"--versioning-configuration Status=Enabled --endpoint {endpoint} {acl} "
output, success = execute_cmd(cmd_line) output, success = execute_cmd(cmd_line)
@ -25,7 +25,7 @@ def create_bucket(endpoint, versioning, location, acl, no_verify_ssl):
f"Error: {output}", endpoint) f"Error: {output}", endpoint)
return False return False
if versioning: if versioning == "True":
output, success = execute_cmd(cmd_line_ver) output, success = execute_cmd(cmd_line_ver)
if not success: if not success:
log(f"{cmd_line_ver}\n" log(f"{cmd_line_ver}\n"

View file

@ -1,7 +1,7 @@
import re import re
from helpers.cmd import execute_cmd, log from helpers.cmd import execute_cmd, log
def create_container(endpoint, policy, container_creation_retry, wallet_path, config, rules, local=False, retry=0): def create_container(endpoint, policy, container_creation_retry, wallet_path, config, acl, local=False, retry=0):
if retry > int(container_creation_retry): if retry > int(container_creation_retry):
raise ValueError(f"unable to create container: too many unsuccessful attempts") raise ValueError(f"unable to create container: too many unsuccessful attempts")
@ -9,8 +9,10 @@ def create_container(endpoint, policy, container_creation_retry, wallet_path, co
wallet_file = f"--wallet {wallet_path}" wallet_file = f"--wallet {wallet_path}"
if config: if config:
wallet_config = f"--config {config}" wallet_config = f"--config {config}"
if acl:
acl_param = f"--basic-acl {acl}"
cmd_line = f"frostfs-cli --rpc-endpoint {endpoint} container create {wallet_file} {wallet_config} " \ cmd_line = f"frostfs-cli --rpc-endpoint {endpoint} container create {wallet_file} {wallet_config} " \
f" --policy '{policy}' --await" f" --policy '{policy}' {acl_param} --await"
output, success = execute_cmd(cmd_line) output, success = execute_cmd(cmd_line)
@ -34,20 +36,6 @@ def create_container(endpoint, policy, container_creation_retry, wallet_path, co
log(f"Created container: {cid} ({policy})", endpoint) log(f"Created container: {cid} ({policy})", endpoint)
# Add rule for container
if rules:
r = ""
for rule in rules:
r += f" --rule '{rule}' "
cmd_line = f"frostfs-cli --rpc-endpoint {endpoint} ape-manager add {wallet_file} {wallet_config} " \
f" --chain-id 'chain-id' {r} --target-name '{cid}' --target-type 'container'"
output, success = execute_cmd(cmd_line)
if not success:
log(f"{cmd_line}\n"
f"Rule has not been added\n"
f"{output}", endpoint)
return False
if not local: if not local:
return cid return cid
@ -100,7 +88,7 @@ def create_container(endpoint, policy, container_creation_retry, wallet_path, co
return cid return cid
log(f"Created container {cid} is not stored on {endpoint}, creating another one...", endpoint) log(f"Created container {cid} is not stored on {endpoint}, creating another one...", endpoint)
return create_container(endpoint, policy, container_creation_retry, wallet_path, config, rules, local, retry + 1) return create_container(endpoint, policy, container_creation_retry, wallet_path, config, acl, local, retry + 1)
def upload_object(container, payload_filepath, endpoint, wallet_file, wallet_config): def upload_object(container, payload_filepath, endpoint, wallet_file, wallet_config):

View file

@ -16,7 +16,6 @@ ERROR_WRONG_CONTAINERS_COUNT = 1
ERROR_WRONG_OBJECTS_COUNT = 2 ERROR_WRONG_OBJECTS_COUNT = 2
MAX_WORKERS = 50 MAX_WORKERS = 50
DEFAULT_POLICY = "REP 2 IN X CBF 2 SELECT 2 FROM * AS X" DEFAULT_POLICY = "REP 2 IN X CBF 2 SELECT 2 FROM * AS X"
DEFAULT_RULES = ["allow Object.* *"]
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument('--size', help='Upload objects size in kb') parser.add_argument('--size', help='Upload objects size in kb')
@ -38,10 +37,7 @@ parser.add_argument('--workers', help='Count of workers in preset. Max = 50, Def
parser.add_argument('--sleep', help='Time to sleep between containers creation and objects upload (in seconds), ' parser.add_argument('--sleep', help='Time to sleep between containers creation and objects upload (in seconds), '
'Default = 8', default=8) 'Default = 8', default=8)
parser.add_argument('--local', help='Create containers that store data on provided endpoints. Warning: additional empty containers may be created.', action='store_true') parser.add_argument('--local', help='Create containers that store data on provided endpoints. Warning: additional empty containers may be created.', action='store_true')
parser.add_argument( parser.add_argument('--acl', help='Container ACL. Default is public-read-write.', default='public-read-write')
'--rule',
help='Rule attached to created containers. All entries of CONTAINER_ID will be replaced with id of created container.',
action="append")
args: Namespace = parser.parse_args() args: Namespace = parser.parse_args()
print(args) print(args)
@ -60,10 +56,7 @@ def main():
wallet_config = args.config wallet_config = args.config
workers = int(args.workers) workers = int(args.workers)
objects_per_container = int(args.preload_obj) objects_per_container = int(args.preload_obj)
rules = args.rule
if not rules:
rules = DEFAULT_RULES
ignore_errors = args.ignore_errors ignore_errors = args.ignore_errors
if args.update: if args.update:
# Open file # Open file
@ -75,7 +68,7 @@ def main():
containers_count = int(args.containers) containers_count = int(args.containers)
print(f"Create containers: {containers_count}") print(f"Create containers: {containers_count}")
with ProcessPoolExecutor(max_workers=min(MAX_WORKERS, workers)) as executor: with ProcessPoolExecutor(max_workers=min(MAX_WORKERS, workers)) as executor:
containers_runs = [executor.submit(create_container, endpoint, policy, container_creation_retry, wallet, wallet_config, rules, args.local) containers_runs = [executor.submit(create_container, endpoint, policy, container_creation_retry, wallet, wallet_config, args.acl, args.local)
for _, endpoint, policy in for _, endpoint, policy in
zip(range(containers_count), cycle(endpoints), cycle(args.policy))] zip(range(containers_count), cycle(endpoints), cycle(args.policy))]

View file

@ -13,7 +13,6 @@ from helpers.aws_cli import create_bucket, upload_object
ERROR_WRONG_CONTAINERS_COUNT = 1 ERROR_WRONG_CONTAINERS_COUNT = 1
ERROR_WRONG_OBJECTS_COUNT = 2 ERROR_WRONG_OBJECTS_COUNT = 2
ERROR_WRONG_PERCENTAGE = 3
MAX_WORKERS = 50 MAX_WORKERS = 50
DEFAULT_LOCATION = "" DEFAULT_LOCATION = ""
@ -27,8 +26,7 @@ parser.add_argument('--endpoint', help='S3 Gateways addresses separated by comma
parser.add_argument('--update', help='True/False, False by default. Save existed buckets from target file (--out). ' parser.add_argument('--update', help='True/False, False by default. Save existed buckets from target file (--out). '
'New buckets will not be created.') 'New buckets will not be created.')
parser.add_argument('--location', help=f'AWS location constraint. Default is "{DEFAULT_LOCATION}"', action="append") parser.add_argument('--location', help=f'AWS location constraint. Default is "{DEFAULT_LOCATION}"', action="append")
parser.add_argument('--versioning', help='True/False, False by default. Alias of --buckets_versioned=100') parser.add_argument('--versioning', help='True/False, False by default.')
parser.add_argument('--buckets_versioned', help='Percent of versioned buckets. Default is 0', default=0)
parser.add_argument('--ignore-errors', help='Ignore preset errors', action='store_true') parser.add_argument('--ignore-errors', help='Ignore preset errors', action='store_true')
parser.add_argument('--no-verify-ssl', help='Ignore SSL verifications', action='store_true') parser.add_argument('--no-verify-ssl', help='Ignore SSL verifications', action='store_true')
parser.add_argument('--workers', help='Count of workers in preset. Max = 50, Default = 50', default=50) parser.add_argument('--workers', help='Count of workers in preset. Max = 50, Default = 50', default=50)
@ -64,17 +62,8 @@ def main():
print(f"Create buckets: {buckets_count}") print(f"Create buckets: {buckets_count}")
with ProcessPoolExecutor(max_workers=min(MAX_WORKERS, workers)) as executor: with ProcessPoolExecutor(max_workers=min(MAX_WORKERS, workers)) as executor:
if not 0 <= int(args.buckets_versioned) <= 100: buckets_runs = [executor.submit(create_bucket, endpoint, args.versioning, location, args.acl, no_verify_ssl)
print(f"Percent of versioned buckets must be between 0 and 100: got {args.buckets_versioned}") for _, endpoint, location in
if not ignore_errors:
sys.exit(ERROR_WRONG_PERCENTAGE)
if args.versioning == "True":
versioning_per_bucket = [True] * buckets_count
else:
num_versioned_buckets = int((int(args.buckets_versioned) / 100) * buckets_count)
versioning_per_bucket = [True] * num_versioned_buckets + [False] * (buckets_count - num_versioned_buckets)
buckets_runs = [executor.submit(create_bucket, endpoint, versioning_per_bucket[i], location, args.acl, no_verify_ssl)
for i, endpoint, location in
zip(range(buckets_count), cycle(endpoints), cycle(args.location))] zip(range(buckets_count), cycle(endpoints), cycle(args.location))]
for run in buckets_runs: for run in buckets_runs:

View file

@ -125,7 +125,7 @@ The tests will use all pre-created buckets for PUT operations and all pre-create
$ ./scenarios/preset/preset_s3.py --size 1024 --buckets 1 --out s3_1024kb.json --endpoint host1:8084 --preload_obj 500 --location load-1-4 $ ./scenarios/preset/preset_s3.py --size 1024 --buckets 1 --out s3_1024kb.json --endpoint host1:8084 --preload_obj 500 --location load-1-4
``` ```
* '--location' - specify the name of container policy (from policy.json file). It's important to run 'aws configure' each time when the policy file has been changed to pick up the latest policies. * '--location' - specify the name of container policy (from policy.json file). It's important to run 'aws configure' each time when the policy file has been changed to pick up the latest policies.
* '--buckets_versioned' - specify the percentage of versioned buckets from the total number of created buckets. Default is 0
3. Execute scenario with options: 3. Execute scenario with options:
```shell ```shell
@ -138,8 +138,6 @@ Options (in addition to the common options):
* `DELETE_AGE` - age of object in seconds before which it can not be deleted. This parameter can be used to control how many objects we have in the system under load. * `DELETE_AGE` - age of object in seconds before which it can not be deleted. This parameter can be used to control how many objects we have in the system under load.
* `SLEEP_DELETE` - time interval (in seconds) between deleting VU iterations. * `SLEEP_DELETE` - time interval (in seconds) between deleting VU iterations.
* `OBJ_NAME` - if specified, this name will be used for all write operations instead of random generation. * `OBJ_NAME` - if specified, this name will be used for all write operations instead of random generation.
* `OBJ_NAME_LENGTH` - if specified, then name of the object will be generated with the specified length of ASCII characters.
* `DIR_HEIGHT`, `DIR_WIDTH` - if both specified, object name will consist of `DIR_HEIGHT` directories, each of which can have `DIR_WIDTH` subdirectories, for example for `DIR_HEIGHT = 3, DIR_WIDTH = 100`, object names will be `/dir{1...100}/dir{1...100}/dir{1...100}/{uuid || OBJ_NAME}`
## S3 Multipart ## S3 Multipart

View file

@ -6,10 +6,10 @@ import registry from 'k6/x/frostfs/registry';
import s3 from 'k6/x/frostfs/s3'; import s3 from 'k6/x/frostfs/s3';
import stats from 'k6/x/frostfs/stats'; import stats from 'k6/x/frostfs/stats';
import {generateS3Key} from './libs/keygen.js'; import {newGenerator} from './libs/datagen.js';
import {parseEnv} from './libs/env-parser.js'; import {parseEnv} from './libs/env-parser.js';
import {textSummary} from './libs/k6-summary-0.0.2.js'; import {textSummary} from './libs/k6-summary-0.0.2.js';
import {newGenerator} from './libs/datagen.js'; import {uuidv4} from './libs/k6-utils-1.4.0.js';
parseEnv(); parseEnv();
@ -159,7 +159,7 @@ export function obj_write() {
sleep(__ENV.SLEEP_WRITE); sleep(__ENV.SLEEP_WRITE);
} }
const key = generateS3Key(); const key = __ENV.OBJ_NAME || uuidv4();
const bucket = bucket_list[Math.floor(Math.random() * bucket_list.length)]; const bucket = bucket_list[Math.floor(Math.random() * bucket_list.length)];
const payload = generator.genPayload(); const payload = generator.genPayload();

View file

@ -5,10 +5,10 @@ import registry from 'k6/x/frostfs/registry';
import s3 from 'k6/x/frostfs/s3'; import s3 from 'k6/x/frostfs/s3';
import stats from 'k6/x/frostfs/stats'; import stats from 'k6/x/frostfs/stats';
import {generateS3Key} from './libs/keygen.js';
import {newGenerator} from './libs/datagen.js'; import {newGenerator} from './libs/datagen.js';
import {parseEnv} from './libs/env-parser.js'; import {parseEnv} from './libs/env-parser.js';
import {textSummary} from './libs/k6-summary-0.0.2.js'; import {textSummary} from './libs/k6-summary-0.0.2.js';
import {uuidv4} from './libs/k6-utils-1.4.0.js';
parseEnv(); parseEnv();
@ -177,7 +177,7 @@ export function obj_write() {
sleep(__ENV.SLEEP_WRITE); sleep(__ENV.SLEEP_WRITE);
} }
const key = generateS3Key(); const key = __ENV.OBJ_NAME || uuidv4();
const bucket = bucket_list[Math.floor(Math.random() * bucket_list.length)]; const bucket = bucket_list[Math.floor(Math.random() * bucket_list.length)];
const payload = generator.genPayload(); const payload = generator.genPayload();

View file

@ -6,10 +6,10 @@ import registry from 'k6/x/frostfs/registry';
import s3 from 'k6/x/frostfs/s3'; import s3 from 'k6/x/frostfs/s3';
import stats from 'k6/x/frostfs/stats'; import stats from 'k6/x/frostfs/stats';
import {generateS3Key} from './libs/keygen.js';
import {newGenerator} from './libs/datagen.js'; import {newGenerator} from './libs/datagen.js';
import {parseEnv} from './libs/env-parser.js'; import {parseEnv} from './libs/env-parser.js';
import {textSummary} from './libs/k6-summary-0.0.2.js'; import {textSummary} from './libs/k6-summary-0.0.2.js';
import {uuidv4} from './libs/k6-utils-1.4.0.js';
parseEnv(); parseEnv();
@ -159,7 +159,7 @@ export function obj_write() {
sleep(__ENV.SLEEP_WRITE); sleep(__ENV.SLEEP_WRITE);
} }
const key = generateS3Key(); const key = __ENV.OBJ_NAME || uuidv4();
const bucket = bucket_list[Math.floor(Math.random() * bucket_list.length)]; const bucket = bucket_list[Math.floor(Math.random() * bucket_list.length)];
const payload = generator.genPayload(); const payload = generator.genPayload();

View file

@ -5,10 +5,10 @@ import registry from 'k6/x/frostfs/registry';
import s3 from 'k6/x/frostfs/s3'; import s3 from 'k6/x/frostfs/s3';
import stats from 'k6/x/frostfs/stats'; import stats from 'k6/x/frostfs/stats';
import {generateS3Key} from './libs/keygen.js';
import {newGenerator} from './libs/datagen.js'; import {newGenerator} from './libs/datagen.js';
import {parseEnv} from './libs/env-parser.js'; import {parseEnv} from './libs/env-parser.js';
import {textSummary} from './libs/k6-summary-0.0.2.js'; import {textSummary} from './libs/k6-summary-0.0.2.js';
import {uuidv4} from './libs/k6-utils-1.4.0.js';
parseEnv(); parseEnv();
@ -101,7 +101,7 @@ export function obj_write_multipart() {
sleep(__ENV.SLEEP_WRITE); sleep(__ENV.SLEEP_WRITE);
} }
const key = generateS3Key(); const key = __ENV.OBJ_NAME || uuidv4();
const bucket = bucket_list[Math.floor(Math.random() * bucket_list.length)]; const bucket = bucket_list[Math.floor(Math.random() * bucket_list.length)];
const payload = generator.genPayload(); const payload = generator.genPayload();

View file

@ -5,7 +5,6 @@ import registry from 'k6/x/frostfs/registry';
import s3local from 'k6/x/frostfs/s3local'; import s3local from 'k6/x/frostfs/s3local';
import stats from 'k6/x/frostfs/stats'; import stats from 'k6/x/frostfs/stats';
import {generateS3Key} from './libs/keygen.js';
import {newGenerator} from './libs/datagen.js'; import {newGenerator} from './libs/datagen.js';
import {parseEnv} from './libs/env-parser.js'; import {parseEnv} from './libs/env-parser.js';
import {textSummary} from './libs/k6-summary-0.0.2.js'; import {textSummary} from './libs/k6-summary-0.0.2.js';
@ -132,7 +131,7 @@ export function handleSummary(data) {
} }
export function obj_write() { export function obj_write() {
const key = generateS3Key(); const key = __ENV.OBJ_NAME || uuidv4();
const bucket = bucket_list[Math.floor(Math.random() * bucket_list.length)]; const bucket = bucket_list[Math.floor(Math.random() * bucket_list.length)];
const payload = generator.genPayload(); const payload = generator.genPayload();