forked from TrueCloudLab/xk6-frostfs
Vladimir Domnich
962da644af
It improves payload generation in our scenarios. Current implementation of scenarios generates single random payload at the start and then sends this same payload on every request. More realistic test is to generate unique payload for each request. However, this is an expensive operation that can easily cause a bottleneck on K6 side when we run multiple writing VUs. So instead we generate a random buffer with some extra bytes and then take slices of this buffer thus producing a random payload for each request. Signed-off-by: Vladimir Domnich <v.domnich@yadro.com>
115 lines
3 KiB
JavaScript
115 lines
3 KiB
JavaScript
import datagen from 'k6/x/neofs/datagen';
|
|
import s3 from 'k6/x/neofs/s3';
|
|
import { SharedArray } from 'k6/data';
|
|
import { sleep } from 'k6';
|
|
|
|
const obj_list = new SharedArray('obj_list', function () {
|
|
return JSON.parse(open(__ENV.PREGEN_JSON)).objects; });
|
|
|
|
const bucket_list = new SharedArray('bucket_list', function () {
|
|
return JSON.parse(open(__ENV.PREGEN_JSON)).buckets; });
|
|
|
|
const read_size = JSON.parse(open(__ENV.PREGEN_JSON)).obj_size;
|
|
|
|
/*
|
|
./k6 run -e PROFILE=0:60 -e WRITE_OBJ_SIZE=1024 -e CLIENTS=200 -e NODES=node4.data:8084 -e PREGEN_JSON=test.json scenarios/s3_t.js
|
|
|
|
Parse profile from env.
|
|
Format write:obj_size:
|
|
* write - write operations in percent, relative to read operations
|
|
* duration - duration in seconds
|
|
|
|
OBJ_NAME - this name will be used for all write operations instead of randow generation in case of declared.
|
|
*/
|
|
|
|
const [ write, duration ] = __ENV.PROFILE.split(':');
|
|
|
|
// Set VUs between write and read operations
|
|
let vus_read = Math.ceil(__ENV.CLIENTS/100*(100-parseInt(write)))
|
|
let vus_write = __ENV.CLIENTS - vus_read
|
|
|
|
const generator = datagen.generator(1024 * parseInt(__ENV.WRITE_OBJ_SIZE));
|
|
|
|
let nodes = __ENV.NODES.split(',')
|
|
let rand_node = nodes[Math.floor(Math.random()*nodes.length)];
|
|
|
|
let s3_cli = s3.connect(`http://${rand_node}`)
|
|
|
|
let scenarios = {}
|
|
|
|
if (vus_write > 0){
|
|
scenarios.write= {
|
|
executor: 'constant-vus',
|
|
vus: vus_write,
|
|
duration: `${duration}s`,
|
|
exec: 'obj_write',
|
|
gracefulStop: '5s',
|
|
}
|
|
}
|
|
|
|
if (vus_read > 0){
|
|
scenarios.read= {
|
|
executor: 'constant-vus',
|
|
vus: vus_read,
|
|
duration: `${duration}s`,
|
|
exec: 'obj_read',
|
|
gracefulStop: '5s',
|
|
}
|
|
}
|
|
|
|
export function setup() {
|
|
console.log("Pregenerated buckets: " + bucket_list.length)
|
|
console.log("Pregenerated read object size: " + read_size)
|
|
console.log("Pregenerated total objects: " + obj_list.length)
|
|
}
|
|
|
|
export const options = {
|
|
scenarios: scenarios,
|
|
setupTimeout: '5s',
|
|
};
|
|
|
|
export function obj_write() {
|
|
let key = "";
|
|
if (__ENV.OBJ_NAME){
|
|
key = __ENV.OBJ_NAME;
|
|
}
|
|
else{
|
|
key = uuidv4();
|
|
}
|
|
|
|
|
|
let bucket = bucket_list[Math.floor(Math.random()*bucket_list.length)];
|
|
|
|
const { payload } = generator.genPayload(false);
|
|
let resp = s3_cli.put(bucket, key, payload)
|
|
|
|
if (!resp.success) {
|
|
console.log(resp.error);
|
|
}
|
|
|
|
if (__ENV.SLEEP) {
|
|
sleep(__ENV.SLEEP);
|
|
}
|
|
|
|
}
|
|
|
|
export function obj_read() {
|
|
let random_read_obj = obj_list[Math.floor(Math.random()*obj_list.length)];
|
|
|
|
let resp = s3_cli.get(random_read_obj.bucket, random_read_obj.object)
|
|
if (!resp.success) {
|
|
console.log(resp.error);
|
|
}
|
|
|
|
if (__ENV.SLEEP) {
|
|
sleep(__ENV.SLEEP);
|
|
}
|
|
|
|
}
|
|
|
|
export function uuidv4() {
|
|
return 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'.replace(/[xy]/g, function(c) {
|
|
let r = Math.random() * 16 | 0, v = c === 'x' ? r : (r & 0x3 | 0x8);
|
|
return v.toString(16);
|
|
});
|
|
}
|