#!/usr/bin/python3 import argparse from itertools import cycle import json import sys import tempfile import time from concurrent.futures import ProcessPoolExecutor from helpers.cmd import random_payload from helpers.aws_cli import create_bucket, upload_object parser = argparse.ArgumentParser() parser.add_argument('--size', help='Upload objects size in kb.') parser.add_argument('--buckets', help='Number of buckets to create.') parser.add_argument('--out', help='JSON file with output.') parser.add_argument('--preload_obj', help='Number of pre-loaded objects.') parser.add_argument('--endpoint', help='S3 Gateways addresses separated by comma.') parser.add_argument('--update', help='True/False, False by default. Save existed buckets from target file (--out). ' 'New buckets will not be created.') parser.add_argument('--location', help='AWS location. Will be empty, if has not be declared.', default="") parser.add_argument('--versioning', help='True/False, False by default.') parser.add_argument('--ignore-errors', help='Ignore preset errors', action='store_true') parser.add_argument('--no-verify-ssl', help='Ignore SSL verifications', action='store_true') parser.add_argument('--workers', help='Count of workers in preset. Max = 50, Default = 50', default=50) parser.add_argument('--sleep', help='Time to sleep between buckets creation and objects upload (in seconds), ' 'Default = 8', default=8) args = parser.parse_args() print(args) ERROR_WRONG_CONTAINERS_COUNT = 1 ERROR_WRONG_OBJECTS_COUNT = 2 MAX_WORKERS = 50 def main(): buckets = [] objects_list = [] ignore_errors = args.ignore_errors no_verify_ssl = args.no_verify_ssl endpoints = args.endpoint.split(',') workers = int(args.workers) objects_per_bucket = int(args.preload_obj) if args.update: # Open file with open(args.out) as f: data_json = json.load(f) buckets = data_json['buckets'] buckets_count = len(buckets) # Get CID list else: buckets_count = int(args.buckets) print(f"Create buckets: {buckets_count}") with ProcessPoolExecutor(max_workers=min(MAX_WORKERS, workers)) as executor: buckets_runs = [executor.submit(create_bucket, endpoint, args.versioning, args.location, no_verify_ssl) for _, endpoint in zip(range(buckets_count), cycle(endpoints))] for run in buckets_runs: bucket_name = run.result() if bucket_name: buckets.append(bucket_name) print("Create buckets: Completed") print(f" > Buckets: {buckets}") if buckets_count == 0 or len(buckets) != buckets_count: print(f"Buckets mismatch in preset: expected {buckets_count}, created {len(buckets)}") if not ignore_errors: sys.exit(ERROR_WRONG_CONTAINERS_COUNT) if args.sleep != 0: print(f"Sleep for {args.sleep} seconds") time.sleep(args.sleep) print(f"Upload objects to each bucket: {objects_per_bucket} ") payload_file = tempfile.NamedTemporaryFile() random_payload(payload_file, args.size) print(" > Create random payload: Completed") total_objects = objects_per_bucket * buckets_count with ProcessPoolExecutor(max_workers=min(MAX_WORKERS, workers)) as executor: objects_runs = [executor.submit(upload_object, bucket, payload_file.name, endpoint, no_verify_ssl) for _, bucket, endpoint in zip(range(total_objects), cycle(buckets), cycle(endpoints))] for run in objects_runs: result = run.result() if result: bucket = result[0] endpoint = result[1] object_id = result[2] objects_list.append({'bucket': bucket, 'object': object_id}) print(f" > Uploaded object {object_id} for bucket {bucket} via endpoint {endpoint}.") if total_objects > 0 and len(objects_list) != total_objects: print(f"Objects mismatch in preset: expected {total_objects}, created {len(objects_list)}") if not ignore_errors: sys.exit(ERROR_WRONG_OBJECTS_COUNT) data = {'buckets': buckets, 'objects': objects_list, 'obj_size': args.size + " Kb"} with open(args.out, 'w+') as f: json.dump(data, f, ensure_ascii=False, indent=2) print("Result:") print(f" > Total Buckets has been created: {len(buckets)}.") print(f" > Total Objects has been created: {len(objects_list)}.") if __name__ == "__main__": main()