add scenarios with pregen

Signed-off-by: anatoly@nspcc.ru <anatoly@nspcc.ru>
fyrchik/lorem-ipsum
anatoly@nspcc.ru 2022-06-23 05:20:56 +03:00 committed by Anatoly Bogatyrev
parent 5d77a526d0
commit f85c5d31db
9 changed files with 635 additions and 0 deletions

View File

@ -0,0 +1,7 @@
{
"load-1-1": "REP 1 IN X CBF 1 SELECT 1 FROM * AS X",
"load-1-2": "REP 1 IN X CBF 1 SELECT 2 FROM * AS X",
"load-1-3": "REP 1 IN X CBF 1 SELECT 3 FROM * AS X",
"load-1-4": "REP 1 IN X CBF 1 SELECT 4 FROM * AS X",
"node-off": "REP 2 IN X CBF 2 SELECT 2 FROM * AS X"
}

View File

@ -0,0 +1 @@
{"records": [{"operation": "PUT", "action": "ALLOW", "filters": [], "targets": [{"role": "OTHERS", "keys": []}]}, {"operation": "GET", "action": "ALLOW", "filters": [], "targets": [{"role": "OTHERS", "keys": []}]}, {"operation": "DELETE", "action": "ALLOW", "filters": [], "targets": [{"role": "OTHERS", "keys": []}]}, {"operation": "SEARCH", "action": "ALLOW", "filters": [], "targets": [{"role": "OTHERS", "keys": []}]}, {"operation": "GETRANGE", "action": "ALLOW", "filters": [], "targets": [{"role": "OTHERS", "keys": []}]}, {"operation": "GETRANGEHASH", "action": "ALLOW", "filters": [], "targets": [{"role": "OTHERS", "keys": []}]}, {"operation": "HEAD", "action": "ALLOW", "filters": [], "targets": [{"role": "OTHERS", "keys": []}]}]}

View File

@ -0,0 +1 @@
{"version":"3.0","accounts":[{"address":"Nge3U4wJpDGK2BWGfH5VcZ5PAbC6Ro7GHY","key":"6PYTcTrvskzG2txTRSAoyQbSiuVxfa7oYmPWNCphCNLGjLdH1WPZX4M8o7","label":"","contract":{"script":"DCEDtjKkGW5fy6cv4Uy0xkltDaUJ3lkuIuKWSlswEZIAQM1BVuezJw==","parameters":[{"name":"parameter0","type":"Signature"}],"deployed":false},"lock":false,"isDefault":false}],"scrypt":{"n":16384,"r":8,"p":8},"extra":{"Tokens":null}}

93
scenarios/grpc.js 100644
View File

@ -0,0 +1,93 @@
import native from 'k6/x/neofs/native';
import crypto from 'k6/crypto';
import { SharedArray } from 'k6/data';
const obj_list = new SharedArray('obj_list', function () {
return JSON.parse(open(__ENV.PREGEN_JSON)).objects; });
const container_list = new SharedArray('container_list', function () {
return JSON.parse(open(__ENV.PREGEN_JSON)).containers; });
const read_size = JSON.parse(open(__ENV.PREGEN_JSON)).obj_size;
/*
./k6 run -e PROFILE=0:60 -e WRITE_OBJ_SIZE=1024 -e CLIENTS=200 -e NODES=node4.data:8084 -e PREGEN_JSON=test.json scenarios/grpc.js
Parse profile from env.
Format write:obj_size:
* write - write operations in percent, relative to read operations
* duration - duration in seconds
*/
const [ write, duration ] = __ENV.PROFILE.split(':');
// Set VUs between write and read operations
let vus_read = Math.ceil(__ENV.CLIENTS/100*(100-parseInt(write)))
let vus_write = __ENV.CLIENTS - vus_read
const payload = crypto.randomBytes(1024*parseInt(__ENV.WRITE_OBJ_SIZE))
let nodes = __ENV.NODES.split(',')
let rand_node = nodes[Math.floor(Math.random()*nodes.length)];
const neofs_cli = native.connect(rand_node, "")
let scenarios = {}
if (vus_write > 0){
scenarios.write= {
executor: 'constant-vus',
vus: vus_write,
duration: `${duration}s`,
exec: 'obj_write',
gracefulStop: '5s',
}
}
if (vus_read > 0){
scenarios.read= {
executor: 'constant-vus',
vus: vus_read,
duration: `${duration}s`,
exec: 'obj_read',
gracefulStop: '5s',
}
}
export function setup() {
console.log("Pregenerated containers: " + container_list.length)
console.log("Pregenerated read object size: " + read_size)
console.log("Pregenerated total objects: " + obj_list.length)
}
export const options = {
scenarios: scenarios,
setupTimeout: '5s',
};
export function obj_write() {
let headers = {
'unique_header': uuidv4()
}
let container = container_list[Math.floor(Math.random()*container_list.length)];
let resp = neofs_cli.put( container, headers, payload);
if (!resp.success) {
console.log(resp.error);
}
}
export function obj_read() {
let random_read_obj = obj_list[Math.floor(Math.random()*obj_list.length)];
let resp = neofs_cli.get(random_read_obj.container, random_read_obj.object)
if (!resp.success) {
console.log(resp.error);
}
}
export function uuidv4() {
return 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'.replace(/[xy]/g, function(c) {
let r = Math.random() * 16 | 0, v = c === 'x' ? r : (r & 0x3 | 0x8);
return v.toString(16);
});
}

92
scenarios/http.js 100644
View File

@ -0,0 +1,92 @@
import http from 'k6/http';
import crypto from 'k6/crypto';
import { SharedArray } from 'k6/data';
import { sleep } from 'k6';
const obj_list = new SharedArray('obj_list', function () {
return JSON.parse(open(__ENV.PREGEN_JSON)).objects; });
const container_list = new SharedArray('container_list', function () {
return JSON.parse(open(__ENV.PREGEN_JSON)).containers; });
const read_size = JSON.parse(open(__ENV.PREGEN_JSON)).obj_size;
/*
Parse profile from env.
Format write:obj_size:
* write - write operations in percent, relative to read operations
* duration - duration in seconds
*/
const [ write, duration ] = __ENV.PROFILE.split(':');
// Set VUs between write and read operations
let vus_read = Math.ceil(__ENV.CLIENTS/100*(100-parseInt(write)))
let vus_write = __ENV.CLIENTS - vus_read
const payload = crypto.randomBytes(1024*parseInt(__ENV.WRITE_OBJ_SIZE))
let nodes = __ENV.NODES.split(',') // node1.neofs
let rand_node = nodes[Math.floor(Math.random()*nodes.length)];
let scenarios = {}
if (vus_write > 0){
scenarios.write= {
executor: 'constant-vus',
vus: vus_write,
duration: `${duration}s`,
exec: 'obj_write',
gracefulStop: '5s',
}
}
if (vus_read > 0){
scenarios.read= {
executor: 'constant-vus',
vus: vus_read,
duration: `${duration}s`,
exec: 'obj_read',
gracefulStop: '5s',
}
}
export function setup() {
console.log("Pregenerated containers: " + container_list.length)
console.log("Pregenerated read object size: " + read_size)
console.log("Pregenerated total objects: " + obj_list.length)
}
export const options = {
scenarios: scenarios,
setupTimeout: '5s',
};
export function obj_write() {
let data = {
field: uuidv4(),
file: http.file(payload, "random.data"),
};
let container = container_list[Math.floor(Math.random()*container_list.length)];
let resp = http.post(`http://${rand_node}/upload/${container}`, data);
if (resp.status != 200) {
console.log(`${resp.status}`);
}
//sleep(1)
}
export function obj_read() {
let random_read_obj = obj_list[Math.floor(Math.random()*obj_list.length)];
let resp = http.get(`http://${rand_node}/get/${random_read_obj.container}/${random_read_obj.object}`);
if (resp.status != 200) {
console.log(`${random_read_obj.object} - ${resp.status}`);
}
}
export function uuidv4() {
return 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'.replace(/[xy]/g, function(c) {
let r = Math.random() * 16 | 0, v = c === 'x' ? r : (r & 0x3 | 0x8);
return v.toString(16);
});
}

View File

@ -0,0 +1,132 @@
#!/usr/bin/python3
from multiprocessing import Process
import uuid
import shlex
from subprocess import check_output, CalledProcessError, STDOUT
import json
import os
import argparse, sys
from concurrent.futures import ProcessPoolExecutor
parser=argparse.ArgumentParser()
parser.add_argument('--size', help='Upload objects size in kb')
parser.add_argument('--containers', help='Number of containers to create')
parser.add_argument('--out', help='JSON file with output')
parser.add_argument('--preload_obj', help='Number of pre-loaded objects')
parser.add_argument('--endpoint', help='Node address')
parser.add_argument('--update', help='Save existed containers')
args=parser.parse_args()
print(args)
def main():
container_list = []
objects_struct = []
payload_filepath='/tmp/data_file'
if args.update:
# Open file
with open(args.out) as f:
data_json = json.load(f)
container_list = data_json['containers']
# Get CID list
else:
print(f"Create containers: {args.containers}")
with ProcessPoolExecutor(max_workers=10) as executor:
containers_runs = {executor.submit(create_container): _ for _ in range(int(args.containers))}
for run in containers_runs:
if run.result() is not None:
container_list.append(run.result())
print("Create containers: Completed")
print(f" > Containers: {container_list}")
print(f"Upload objects to each container: {args.preload_obj} ")
random_payload(payload_filepath)
print(" > Create random payload: Completed")
for container in container_list:
print(f" > Upload objects for container {container}")
with ProcessPoolExecutor(max_workers=50) as executor:
objects_runs = {executor.submit(upload_object, container, payload_filepath): _ for _ in range(int(args.preload_obj))}
for run in objects_runs:
if run.result() is not None:
objects_struct.append({'container': container, 'object': run.result()})
print(f" > Upload objects for container {container}: Completed")
print("Upload objects to each container: Completed")
data = { 'containers': container_list, 'objects': objects_struct, 'obj_size': args.size + " Kb" }
with open(args.out, 'w') as f:
json.dump(data, f, ensure_ascii=False)
print(f"Result:")
print(f" > Total Containers has been created: {len(container_list)}.")
print(f" > Total Objects has been created: {len(objects_struct)}.")
def random_payload(payload_filepath):
with open('%s'%payload_filepath, 'wb') as fout:
fout.write(os.urandom(1024*int(args.size)))
def execute_cmd(cmd_line):
args = shlex.split(cmd_line)
output = ""
try:
output = check_output(args, stderr=STDOUT).decode()
success = True
except CalledProcessError as e:
output = e.output.decode()
success = False
return output, success
def create_container():
cmd_line = f"neofs-cli --rpc-endpoint {args.endpoint} container create -g --policy 'REP 1 IN X CBF 1 SELECT 1 FROM * AS X' --basic-acl public-read-write --await"
ouptut, success = execute_cmd(cmd_line)
if not success:
print(f" > Container has not been created.")
else:
try:
fst_str = ouptut.split('\n')[0]
except Exception:
print(f"Got empty output: {ouptut}")
splitted = fst_str.split(": ")
if len(splitted) != 2:
raise ValueError(f"no CID was parsed from command output: \t{fst_str}")
return splitted[1]
def upload_object(container, payload_filepath):
object_name = ""
cmd_line = f"neofs-cli --rpc-endpoint {args.endpoint} object put -g --file {payload_filepath} --cid {container} --no-progress"
out, success = execute_cmd(cmd_line)
if not success:
print(f" > Object {object_name} has not been uploaded.")
else:
try:
# taking second string from command output
snd_str = out.split('\n')[1]
except:
print(f"Got empty input: {out}")
splitted = snd_str.split(": ")
if len(splitted) != 2:
raise Exception(f"no OID was parsed from command output: \t{snd_str}")
return splitted[1]
if __name__ == "__main__":
main()

View File

@ -0,0 +1,146 @@
#!/usr/bin/python3
from multiprocessing import Process
import uuid
import shlex
from subprocess import check_output, CalledProcessError, STDOUT
import json
import os
import argparse, sys
from concurrent.futures import ProcessPoolExecutor
parser=argparse.ArgumentParser()
parser.add_argument('--size', help='Upload objects size in kb.')
parser.add_argument('--buckets', help='Number of buckets to create.')
parser.add_argument('--out', help='JSON file with output.')
parser.add_argument('--preload_obj', help='Number of pre-loaded objects.')
parser.add_argument('--endpoint', help='S3 Gateway address.')
parser.add_argument('--update', help='True/False, False by default. Save existed buckets from target file (--out). New buckets will not be created.')
parser.add_argument('--location', help='AWS location. Will be empty, if has not be declared.')
parser.add_argument('--versioning', help='True/False, False by default.')
args=parser.parse_args()
print(args)
def main():
bucket_list = []
objects_struct = []
payload_filepath='/tmp/data_file'
if args.update:
# Open file
with open(args.out) as f:
data_json = json.load(f)
bucket_list = data_json['buckets']
# Get CID list
else:
print(f"Create buckets: {args.buckets}")
with ProcessPoolExecutor(max_workers=10) as executor:
buckets_runs = {executor.submit(create_bucket): _ for _ in range(int(args.buckets))}
for run in buckets_runs:
if run.result() is not None:
bucket_list.append(run.result())
print("Create buckets: Completed")
print(f" > Buckets: {bucket_list}")
print(f"Upload objects to each bucket: {args.preload_obj} ")
random_payload(payload_filepath)
print(" > Create random payload: Completed")
for bucket in bucket_list:
print(f" > Upload objects for bucket {bucket}")
with ProcessPoolExecutor(max_workers=50) as executor:
objects_runs = {executor.submit(upload_object, bucket, payload_filepath): _ for _ in range(int(args.preload_obj))}
for run in objects_runs:
if run.result() is not None:
objects_struct.append({'bucket': bucket, 'object': run.result()})
print(f" > Upload objects for bucket {bucket}: Completed")
print("Upload objects to each bucket: Completed")
data = { 'buckets': bucket_list, 'objects': objects_struct, 'obj_size': args.size + " Kb" }
with open(args.out, 'w') as f:
json.dump(data, f, ensure_ascii=False)
print(f"Result:")
print(f" > Total Buckets has been created: {len(bucket_list)}.")
print(f" > Total Objects has been created: {len(objects_struct)}.")
def random_payload(payload_filepath):
with open('%s'%payload_filepath, 'wb') as fout:
fout.write(os.urandom(1024*int(args.size)))
def execute_cmd(cmd_line):
args = shlex.split(cmd_line)
output = ""
try:
output = check_output(args, stderr=STDOUT).decode()
success = True
except CalledProcessError as e:
output = e.output.decode()
success = False
return output, success
def create_bucket():
bucket_create_marker = False
location = ""
if args.location:
location = f"--create-bucket-configuration 'LocationConstraint={args.location}'"
bucket_name = str(uuid.uuid4())
cmd_line = f"aws --no-verify-ssl s3api create-bucket --bucket {bucket_name} --endpoint http://{args.endpoint} {location}"
cmd_line_ver = f"aws --no-verify-ssl s3api put-bucket-versioning --bucket {bucket_name} --versioning-configuration Status=Enabled --endpoint http://{args.endpoint} "
out, success = execute_cmd(cmd_line)
if not success:
if "succeeded and you already own it" in out:
bucket_create_marker = True
else:
print(f" > Bucket {bucket_name} has not been created.")
else:
bucket_create_marker = True
print(f"cmd: {cmd_line}")
if (bucket_create_marker == True and args.versioning == "True"):
out, success = execute_cmd(cmd_line_ver)
if not success:
print(f" > Bucket versioning has not been applied for bucket {bucket_name}.")
else:
print(f" > Bucket versioning has been applied.")
return bucket_name
def upload_object(bucket, payload_filepath):
object_name = str(uuid.uuid4())
cmd_line = f"aws s3api put-object --bucket {bucket} --key {object_name} --body {payload_filepath} --endpoint http://{args.endpoint} "
out, success = execute_cmd(cmd_line)
if not success:
print(f" > Object {object_name} has not been uploaded.")
else:
return object_name
if __name__ == "__main__":
main()

View File

@ -0,0 +1,60 @@
---
# How to execute scenarios
## gRPC
1. Create pre-generated containers or objects:
The tests will use all pre-created containers for PUT operations and all pre-created objects for READ operations.
```shell
./scenarios/preset/preset_grpc.py --size 1024 --containers 1 --out grpc.json --endpoint node4.intra:8080 --preload_obj 500
```
2. Execute scenario with options:
```shell
$ ./k6 run -e PROFILE=50:60 -e WRITE_OBJ_SIZE=8192 -e CLIENTS=400 -e NODES=node1.data:8080,node4.data:8080 -e PREGEN_JSON=./grpc.json scenarios/grpc.js
```
Options:
* PROFILE - format write:obj_size:duration
* write - write operations in percent, relative to read operations
* duration - time in sec
* CLIENTS - number of VUs for all operations
* WRITE_OBJ_SIZE - object size in kb for write(PUT) operations
* PREGEN_JSON - path to json file with pre-generated containers and objects
## S3
1. Create s3 credential:
```shell
$ neofs-s3-authmate issue-secret --wallet wallet.json --peer node1.intra:8080 --gate-public-key 03d33a2cc7b8daaa5a3df3fccf065f7cf1fc6a3279efc161fcec512dcc0c1b2277 --gate-public-key 03ff0ad212e10683234442530bfd71d0bb18c3fbd6459aba768eacf158b0c359a2 --gate-public-key 033ae03ff30ed3b6665af69955562cfc0eae18d50e798ab31f054ee22e32fee993 --gate-public-key 02127c7498de0765d2461577c9d4f13f916eefd1884896183e6de0d9a85d17f2fb --bearer-rules rules.json --container-placement-policy "REP 1 IN X CBF 1 SELECT 1 FROM * AS X"
Enter password for wallet.json >
{
"access_key_id": "38xRsCTb2LTeCWNK1x5dPYeWC1X22Lq4ahKkj1NV6tPk0Dack8FteJHQaW4jkGWoQBGQ8R8UW6CdoAr7oiwS7fFQb",
"secret_access_key": "e671e353375030da3fbf521028cb43810280b814f97c35672484e303037ea1ab",
"owner_private_key": "48e83ab313ca45fe73c7489565d55652a822ef659c75eaba2d912449713f8e58",
"container_id": "38xRsCTb2LTeCWNK1x5dPYeWC1X22Lq4ahKkj1NV6tPk"
}
```
Run `aws configure`.
2. Create pre-generated buckets or objects:
The tests will use all pre-created buckets for PUT operations and all pre-created objects for READ operations.
```shell
./scenarios/preset/preset_s3.py --size 1024 --buckets 1 --out s3.json --endpoint node4.intra:8084 --preload_obj 500
```
3. Execute scenario with options:
```shell
$ ./k6 run -e PROFILE=50:60 -e WRITE_OBJ_SIZE=8192 -e CLIENTS=400 -e NODES=node1.data:8084,node4.data:8084 -e PREGEN_JSON=s3.json scenarios/s3.js
```

103
scenarios/s3.js 100644
View File

@ -0,0 +1,103 @@
import s3 from 'k6/x/neofs/s3';
import crypto from 'k6/crypto';
import { SharedArray } from 'k6/data';
const obj_list = new SharedArray('obj_list', function () {
return JSON.parse(open(__ENV.PREGEN_JSON)).objects; });
const bucket_list = new SharedArray('bucket_list', function () {
return JSON.parse(open(__ENV.PREGEN_JSON)).buckets; });
const read_size = JSON.parse(open(__ENV.PREGEN_JSON)).obj_size;
/*
./k6 run -e PROFILE=0:60 -e WRITE_OBJ_SIZE=1024 -e CLIENTS=200 -e NODES=node4.data:8084 -e PREGEN_JSON=test.json scenarios/s3_t.js
Parse profile from env.
Format write:obj_size:
* write - write operations in percent, relative to read operations
* duration - duration in seconds
*/
const [ write, duration ] = __ENV.PROFILE.split(':');
// Set VUs between write and read operations
let vus_read = Math.ceil(__ENV.CLIENTS/100*(100-parseInt(write)))
let vus_write = __ENV.CLIENTS - vus_read
const payload = crypto.randomBytes(1024*parseInt(__ENV.WRITE_OBJ_SIZE))
let nodes = __ENV.NODES.split(',')
let rand_node = nodes[Math.floor(Math.random()*nodes.length)];
let s3_cli = s3.connect(`http://${rand_node}`)
let scenarios = {}
if (vus_write > 0){
scenarios.write= {
executor: 'constant-vus',
vus: vus_write,
duration: `${duration}s`,
exec: 'obj_write',
gracefulStop: '5s',
}
}
if (vus_read > 0){
scenarios.read= {
executor: 'constant-vus',
vus: vus_read,
duration: `${duration}s`,
exec: 'obj_read',
gracefulStop: '5s',
}
}
export function setup() {
console.log("Pregenerated buckets: " + bucket_list.length)
console.log("Pregenerated read object size: " + read_size)
console.log("Pregenerated total objects: " + obj_list.length)
}
export const options = {
scenarios: scenarios,
setupTimeout: '5s',
};
export function obj_write() {
let key = "";
if (__ENV.OBJ_NAME){
key = __ENV.OBJ_NAME;
}
else{
key = uuidv4();
}
let bucket = bucket_list[Math.floor(Math.random()*bucket_list.length)];
let resp = s3_cli.put(bucket, key, payload)
if (!resp.success) {
console.log(resp.error);
}
}
export function obj_read() {
let random_read_obj = obj_list[Math.floor(Math.random()*obj_list.length)];
let resp = s3_cli.get(random_read_obj.bucket, random_read_obj.object)
if (!resp.success) {
console.log(resp.error);
}
}
export function uuidv4() {
return 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'.replace(/[xy]/g, function(c) {
let r = Math.random() * 16 | 0, v = c === 'x' ? r : (r & 0x3 | 0x8);
return v.toString(16);
});
}