xk6-frostfs/scenarios/preset/preset_s3.py
Nikita Zinkevich b8bc12ae6d
All checks were successful
DCO action / DCO (pull_request) Successful in 1m27s
Tests and linters / Tests (pull_request) Successful in 2m6s
Tests and linters / Tests with -race (pull_request) Successful in 2m58s
Tests and linters / Lint (pull_request) Successful in 3m3s
[#100] preset_s3: Add a flag for percent of versioned buckets
Add flag "--buckets_versioned". Default is 0 (no versioned buckets)

Signed-off-by: Nikita Zinkevich <n.zinkevich@yadro.com>
2024-11-06 16:47:56 +03:00

134 lines
5.6 KiB
Python
Executable file

#!/usr/bin/python3
import argparse
from itertools import cycle
import json
import sys
import tempfile
import time
from concurrent.futures import ProcessPoolExecutor
from helpers.cmd import random_payload
from helpers.aws_cli import create_bucket, upload_object
ERROR_WRONG_CONTAINERS_COUNT = 1
ERROR_WRONG_OBJECTS_COUNT = 2
ERROR_WRONG_PERCENTAGE = 3
MAX_WORKERS = 50
DEFAULT_LOCATION = ""
parser = argparse.ArgumentParser()
parser.add_argument('--size', help='Upload objects size in kb.')
parser.add_argument('--buckets', help='Number of buckets to create.')
parser.add_argument('--out', help='JSON file with output.')
parser.add_argument('--preload_obj', help='Number of pre-loaded objects.')
parser.add_argument('--endpoint', help='S3 Gateways addresses separated by comma.')
parser.add_argument('--update', help='True/False, False by default. Save existed buckets from target file (--out). '
'New buckets will not be created.')
parser.add_argument('--location', help=f'AWS location constraint. Default is "{DEFAULT_LOCATION}"', action="append")
parser.add_argument('--versioning', help='True/False, False by default. Alias of --buckets_versioned=100')
parser.add_argument('--buckets_versioned', help='Percent of versioned buckets. Default is 0', default=0)
parser.add_argument('--ignore-errors', help='Ignore preset errors', action='store_true')
parser.add_argument('--no-verify-ssl', help='Ignore SSL verifications', action='store_true')
parser.add_argument('--workers', help='Count of workers in preset. Max = 50, Default = 50', default=50)
parser.add_argument('--sleep', help='Time to sleep between buckets creation and objects upload (in seconds), '
'Default = 8', default=8)
parser.add_argument('--acl', help='Bucket ACL. Default is private. Expected values are: private, public-read or public-read-write.', default="private")
args = parser.parse_args()
print(args)
def main():
buckets = []
objects_list = []
ignore_errors = args.ignore_errors
no_verify_ssl = args.no_verify_ssl
endpoints = args.endpoint.split(',')
if not args.location:
args.location = [DEFAULT_LOCATION]
workers = int(args.workers)
objects_per_bucket = int(args.preload_obj)
if args.update:
# Open file
with open(args.out) as f:
data_json = json.load(f)
buckets = data_json['buckets']
buckets_count = len(buckets)
# Get CID list
else:
buckets_count = int(args.buckets)
print(f"Create buckets: {buckets_count}")
with ProcessPoolExecutor(max_workers=min(MAX_WORKERS, workers)) as executor:
if not 0 <= int(args.buckets_versioned) <= 100:
print(f"Percent of versioned buckets must be between 0 and 100: got {args.buckets_versioned}")
if not ignore_errors:
sys.exit(ERROR_WRONG_PERCENTAGE)
if args.versioning == "True":
versioning_per_bucket = [True] * buckets_count
else:
num_versioned_buckets = int((int(args.buckets_versioned) / 100) * buckets_count)
versioning_per_bucket = [True] * num_versioned_buckets + [False] * (buckets_count - num_versioned_buckets)
buckets_runs = [executor.submit(create_bucket, endpoint, versioning_per_bucket[i], location, args.acl, no_verify_ssl)
for i, endpoint, location in
zip(range(buckets_count), cycle(endpoints), cycle(args.location))]
for run in buckets_runs:
bucket_name = run.result()
if bucket_name:
buckets.append(bucket_name)
print("Create buckets: Completed")
print(f" > Buckets: {buckets}")
if buckets_count > 0 and len(buckets) != buckets_count:
print(f"Buckets mismatch in preset: expected {buckets_count}, created {len(buckets)}")
if not ignore_errors:
sys.exit(ERROR_WRONG_CONTAINERS_COUNT)
if args.sleep != 0:
print(f"Sleep for {args.sleep} seconds")
time.sleep(args.sleep)
print(f"Upload objects to each bucket: {objects_per_bucket} ")
payload_file = tempfile.NamedTemporaryFile()
random_payload(payload_file, args.size)
print(" > Create random payload: Completed")
total_objects = objects_per_bucket * buckets_count
with ProcessPoolExecutor(max_workers=min(MAX_WORKERS, workers)) as executor:
objects_runs = [executor.submit(upload_object, bucket, payload_file.name, endpoint, no_verify_ssl)
for _, bucket, endpoint in
zip(range(total_objects), cycle(buckets), cycle(endpoints))]
for run in objects_runs:
result = run.result()
if result:
bucket = result[0]
endpoint = result[1]
object_id = result[2]
objects_list.append({'bucket': bucket, 'object': object_id})
print(f" > Uploaded object {object_id} for bucket {bucket} via endpoint {endpoint}.")
if total_objects > 0 and len(objects_list) != total_objects:
print(f"Objects mismatch in preset: expected {total_objects}, created {len(objects_list)}")
if not ignore_errors:
sys.exit(ERROR_WRONG_OBJECTS_COUNT)
data = {'buckets': buckets, 'objects': objects_list, 'obj_size': args.size + " Kb"}
with open(args.out, 'w+') as f:
json.dump(data, f, ensure_ascii=False, indent=2)
print("Result:")
print(f" > Total Buckets has been created: {len(buckets)}.")
print(f" > Total Objects has been created: {len(objects_list)}.")
if __name__ == "__main__":
main()