forked from TrueCloudLab/xk6-frostfs
[#81] remove schema from preset_s3 and k6 load s3 scenarios
This commit is contained in:
parent
ff6814e15d
commit
6182d47b43
7 changed files with 29 additions and 22 deletions
|
@ -99,13 +99,13 @@ Credentials are taken from default AWS configuration files and ENVs.
|
||||||
|
|
||||||
```js
|
```js
|
||||||
import s3 from 'k6/x/frostfs/s3';
|
import s3 from 'k6/x/frostfs/s3';
|
||||||
const s3_cli = s3.connect("http://s3.frostfs.devenv:8080")
|
const s3_cli = s3.connect("https://s3.frostfs.devenv:8080")
|
||||||
```
|
```
|
||||||
|
|
||||||
You can also provide additional options:
|
You can also provide additional options:
|
||||||
```js
|
```js
|
||||||
import s3 from 'k6/x/frostfs/s3';
|
import s3 from 'k6/x/frostfs/s3';
|
||||||
const s3_cli = s3.connect("http://s3.frostfs.devenv:8080", {'no_verify_ssl': 'true', 'timeout': '60s'})
|
const s3_cli = s3.connect("https://s3.frostfs.devenv:8080", {'no_verify_ssl': 'true', 'timeout': '60s'})
|
||||||
```
|
```
|
||||||
|
|
||||||
* `no_verify_ss` - Bool. If `true` - skip verifying the s3 certificate chain and host name (useful if s3 uses self-signed certificates)
|
* `no_verify_ss` - Bool. If `true` - skip verifying the s3 certificate chain and host name (useful if s3 uses self-signed certificates)
|
||||||
|
|
|
@ -3,15 +3,15 @@ import uuid
|
||||||
from helpers.cmd import execute_cmd
|
from helpers.cmd import execute_cmd
|
||||||
|
|
||||||
|
|
||||||
def create_bucket(endpoint, versioning, location):
|
def create_bucket(endpoint, versioning, location, no_verify_ssl):
|
||||||
if location:
|
if location:
|
||||||
location = f"--create-bucket-configuration 'LocationConstraint={location}'"
|
location = f"--create-bucket-configuration 'LocationConstraint={location}'"
|
||||||
bucket_name = str(uuid.uuid4())
|
bucket_name = str(uuid.uuid4())
|
||||||
|
no_verify_ssl_str = "--no-verify-ssl" if no_verify_ssl else ""
|
||||||
cmd_line = f"aws --no-verify-ssl s3api create-bucket --bucket {bucket_name} " \
|
cmd_line = f"aws {no_verify_ssl_str} s3api create-bucket --bucket {bucket_name} " \
|
||||||
f"--endpoint http://{endpoint} {location}"
|
f"--endpoint {endpoint} {location}"
|
||||||
cmd_line_ver = f"aws --no-verify-ssl s3api put-bucket-versioning --bucket {bucket_name} " \
|
cmd_line_ver = f"aws {no_verify_ssl_str} s3api put-bucket-versioning --bucket {bucket_name} " \
|
||||||
f"--versioning-configuration Status=Enabled --endpoint http://{endpoint} "
|
f"--versioning-configuration Status=Enabled --endpoint {endpoint} "
|
||||||
|
|
||||||
out, success = execute_cmd(cmd_line)
|
out, success = execute_cmd(cmd_line)
|
||||||
|
|
||||||
|
@ -32,11 +32,11 @@ def create_bucket(endpoint, versioning, location):
|
||||||
return bucket_name
|
return bucket_name
|
||||||
|
|
||||||
|
|
||||||
def upload_object(bucket, payload_filepath, endpoint):
|
def upload_object(bucket, payload_filepath, endpoint, no_verify_ssl):
|
||||||
object_name = str(uuid.uuid4())
|
object_name = str(uuid.uuid4())
|
||||||
|
no_verify_ssl_str = "--no-verify-ssl" if no_verify_ssl else ""
|
||||||
cmd_line = f"aws --no-verify-ssl s3api put-object --bucket {bucket} --key {object_name} " \
|
cmd_line = f"aws {no_verify_ssl_str} s3api put-object --bucket {bucket} --key {object_name} " \
|
||||||
f"--body {payload_filepath} --endpoint http://{endpoint}"
|
f"--body {payload_filepath} --endpoint {endpoint}"
|
||||||
out, success = execute_cmd(cmd_line)
|
out, success = execute_cmd(cmd_line)
|
||||||
|
|
||||||
if not success:
|
if not success:
|
||||||
|
|
|
@ -10,7 +10,6 @@ import time
|
||||||
|
|
||||||
from argparse import Namespace
|
from argparse import Namespace
|
||||||
from concurrent.futures import ProcessPoolExecutor
|
from concurrent.futures import ProcessPoolExecutor
|
||||||
|
|
||||||
from helpers.cmd import random_payload
|
from helpers.cmd import random_payload
|
||||||
from helpers.frostfs_cli import create_container, upload_object
|
from helpers.frostfs_cli import create_container, upload_object
|
||||||
|
|
||||||
|
@ -32,7 +31,7 @@ parser.add_argument(
|
||||||
)
|
)
|
||||||
parser.add_argument('--endpoint', help='Nodes addresses separated by comma.')
|
parser.add_argument('--endpoint', help='Nodes addresses separated by comma.')
|
||||||
parser.add_argument('--update', help='Save existed containers')
|
parser.add_argument('--update', help='Save existed containers')
|
||||||
parser.add_argument('--ignore-errors', help='Ignore preset errors')
|
parser.add_argument('--ignore-errors', help='Ignore preset errors', action='store_true')
|
||||||
parser.add_argument('--workers', help='Count of workers in preset. Max = 50, Default = 50', default=50)
|
parser.add_argument('--workers', help='Count of workers in preset. Max = 50, Default = 50', default=50)
|
||||||
parser.add_argument('--sleep', help='Time to sleep between container creation and object PUT (in seconds), '
|
parser.add_argument('--sleep', help='Time to sleep between container creation and object PUT (in seconds), '
|
||||||
'Default = 8', default=8)
|
'Default = 8', default=8)
|
||||||
|
@ -52,7 +51,7 @@ def main():
|
||||||
workers = int(args.workers)
|
workers = int(args.workers)
|
||||||
objects_per_container = int(args.preload_obj)
|
objects_per_container = int(args.preload_obj)
|
||||||
|
|
||||||
ignore_errors = True if args.ignore_errors else False
|
ignore_errors = args.ignore_errors
|
||||||
if args.update:
|
if args.update:
|
||||||
# Open file
|
# Open file
|
||||||
with open(args.out) as f:
|
with open(args.out) as f:
|
||||||
|
|
|
@ -22,7 +22,8 @@ parser.add_argument('--update', help='True/False, False by default. Save existed
|
||||||
'New buckets will not be created.')
|
'New buckets will not be created.')
|
||||||
parser.add_argument('--location', help='AWS location. Will be empty, if has not be declared.', default="")
|
parser.add_argument('--location', help='AWS location. Will be empty, if has not be declared.', default="")
|
||||||
parser.add_argument('--versioning', help='True/False, False by default.')
|
parser.add_argument('--versioning', help='True/False, False by default.')
|
||||||
parser.add_argument('--ignore-errors', help='Ignore preset errors')
|
parser.add_argument('--ignore-errors', help='Ignore preset errors', action='store_true')
|
||||||
|
parser.add_argument('--no-verify-ssl', help='Ignore SSL verifications', action='store_true')
|
||||||
parser.add_argument('--workers', help='Count of workers in preset. Max = 50, Default = 50', default=50)
|
parser.add_argument('--workers', help='Count of workers in preset. Max = 50, Default = 50', default=50)
|
||||||
parser.add_argument('--sleep', help='Time to sleep between container creation and object PUT (in seconds), '
|
parser.add_argument('--sleep', help='Time to sleep between container creation and object PUT (in seconds), '
|
||||||
'Default = 8', default=8)
|
'Default = 8', default=8)
|
||||||
|
@ -37,7 +38,8 @@ MAX_WORKERS = 50
|
||||||
def main():
|
def main():
|
||||||
buckets = []
|
buckets = []
|
||||||
objects_list = []
|
objects_list = []
|
||||||
ignore_errors = True if args.ignore_errors else False
|
ignore_errors = args.ignore_errors
|
||||||
|
no_verify_ssl = args.no_verify_ssl
|
||||||
|
|
||||||
endpoints = args.endpoint.split(',')
|
endpoints = args.endpoint.split(',')
|
||||||
|
|
||||||
|
@ -56,7 +58,7 @@ def main():
|
||||||
print(f"Create buckets: {buckets_count}")
|
print(f"Create buckets: {buckets_count}")
|
||||||
|
|
||||||
with ProcessPoolExecutor(max_workers=min(MAX_WORKERS, workers)) as executor:
|
with ProcessPoolExecutor(max_workers=min(MAX_WORKERS, workers)) as executor:
|
||||||
buckets_runs = [executor.submit(create_bucket, endpoint, args.versioning, args.location)
|
buckets_runs = [executor.submit(create_bucket, endpoint, args.versioning, args.location, no_verify_ssl)
|
||||||
for _, endpoint in
|
for _, endpoint in
|
||||||
zip(range(buckets_count), cycle(endpoints))]
|
zip(range(buckets_count), cycle(endpoints))]
|
||||||
|
|
||||||
|
@ -85,7 +87,7 @@ def main():
|
||||||
total_objects = objects_per_bucket * buckets_count
|
total_objects = objects_per_bucket * buckets_count
|
||||||
|
|
||||||
with ProcessPoolExecutor(max_workers=min(MAX_WORKERS, workers)) as executor:
|
with ProcessPoolExecutor(max_workers=min(MAX_WORKERS, workers)) as executor:
|
||||||
objects_runs = [executor.submit(upload_object, bucket, payload_file.name, endpoint)
|
objects_runs = [executor.submit(upload_object, bucket, payload_file.name, endpoint, no_verify_ssl)
|
||||||
for _, bucket, endpoint in
|
for _, bucket, endpoint in
|
||||||
zip(range(total_objects), cycle(buckets), cycle(endpoints))]
|
zip(range(total_objects), cycle(buckets), cycle(endpoints))]
|
||||||
|
|
||||||
|
|
|
@ -21,10 +21,12 @@ const bucket_list = new SharedArray('bucket_list', function () {
|
||||||
const read_size = JSON.parse(open(__ENV.PREGEN_JSON)).obj_size;
|
const read_size = JSON.parse(open(__ENV.PREGEN_JSON)).obj_size;
|
||||||
const summary_json = __ENV.SUMMARY_JSON || "/tmp/summary.json";
|
const summary_json = __ENV.SUMMARY_JSON || "/tmp/summary.json";
|
||||||
|
|
||||||
|
const no_verify_ssl = __ENV.NO_VERIFY_SSL || "true";
|
||||||
|
const connection_args = {no_verify_ssl: no_verify_ssl}
|
||||||
// Select random S3 endpoint for current VU
|
// Select random S3 endpoint for current VU
|
||||||
const s3_endpoints = __ENV.S3_ENDPOINTS.split(',');
|
const s3_endpoints = __ENV.S3_ENDPOINTS.split(',');
|
||||||
const s3_endpoint = s3_endpoints[Math.floor(Math.random() * s3_endpoints.length)];
|
const s3_endpoint = s3_endpoints[Math.floor(Math.random() * s3_endpoints.length)];
|
||||||
const s3_client = s3.connect(`http://${s3_endpoint}`);
|
const s3_client = s3.connect(s3_endpoint, connection_args);
|
||||||
const log = logging.new().withField("endpoint", s3_endpoint);
|
const log = logging.new().withField("endpoint", s3_endpoint);
|
||||||
|
|
||||||
const registry_enabled = !!__ENV.REGISTRY_FILE;
|
const registry_enabled = !!__ENV.REGISTRY_FILE;
|
||||||
|
|
|
@ -24,7 +24,9 @@ const summary_json = __ENV.SUMMARY_JSON || "/tmp/summary.json";
|
||||||
// Select random S3 endpoint for current VU
|
// Select random S3 endpoint for current VU
|
||||||
const s3_endpoints = __ENV.S3_ENDPOINTS.split(',');
|
const s3_endpoints = __ENV.S3_ENDPOINTS.split(',');
|
||||||
const s3_endpoint = s3_endpoints[Math.floor(Math.random() * s3_endpoints.length)];
|
const s3_endpoint = s3_endpoints[Math.floor(Math.random() * s3_endpoints.length)];
|
||||||
const s3_client = s3.connect(`http://${s3_endpoint}`);
|
const no_verify_ssl = __ENV.NO_VERIFY_SSL || "true";
|
||||||
|
const connection_args = {no_verify_ssl: no_verify_ssl}
|
||||||
|
const s3_client = s3.connect(s3_endpoint, connection_args);
|
||||||
const log = logging.new().withField("endpoint", s3_endpoint);
|
const log = logging.new().withField("endpoint", s3_endpoint);
|
||||||
|
|
||||||
const registry_enabled = !!__ENV.REGISTRY_FILE;
|
const registry_enabled = !!__ENV.REGISTRY_FILE;
|
||||||
|
|
|
@ -43,10 +43,12 @@ if (__ENV.GRPC_ENDPOINTS) {
|
||||||
// Connect to random S3 endpoint
|
// Connect to random S3 endpoint
|
||||||
let s3_client = undefined;
|
let s3_client = undefined;
|
||||||
if (__ENV.S3_ENDPOINTS) {
|
if (__ENV.S3_ENDPOINTS) {
|
||||||
|
const no_verify_ssl = __ENV.NO_VERIFY_SSL || "true";
|
||||||
|
const connection_args = {no_verify_ssl: no_verify_ssl}
|
||||||
const s3_endpoints = __ENV.S3_ENDPOINTS.split(',');
|
const s3_endpoints = __ENV.S3_ENDPOINTS.split(',');
|
||||||
const s3_endpoint = s3_endpoints[Math.floor(Math.random() * s3_endpoints.length)];
|
const s3_endpoint = s3_endpoints[Math.floor(Math.random() * s3_endpoints.length)];
|
||||||
log = log.withField("endpoint", s3_endpoint);
|
log = log.withField("endpoint", s3_endpoint);
|
||||||
s3_client = s3.connect(`http://${s3_endpoint}`);
|
s3_client = s3.connect(s3_endpoint, connection_args);
|
||||||
}
|
}
|
||||||
|
|
||||||
// We will attempt to verify every object in "created" status. The scenario will execute
|
// We will attempt to verify every object in "created" status. The scenario will execute
|
||||||
|
|
Loading…
Reference in a new issue