diff --git a/internal/native/client.go b/internal/native/client.go index f606957..37ee470 100644 --- a/internal/native/client.go +++ b/internal/native/client.go @@ -48,6 +48,11 @@ type ( Error string } + DeleteResponse struct { + Success bool + Error string + } + GetResponse struct { Success bool Error string @@ -129,6 +134,35 @@ func (c *Client) Put(containerID string, headers map[string]string, payload goja return PutResponse{Success: true, ObjectID: id.String()} } +func (c *Client) Delete(containerID string, objectID string) DeleteResponse { + cliContainerID := parseContainerID(containerID) + cliObjectID := parseObjectID(objectID) + + var addr address.Address + addr.SetContainerID(cliContainerID) + addr.SetObjectID(cliObjectID) + + tok := c.tok + tok.ForVerb(session.VerbObjectDelete) + tok.ApplyTo(addr) + err := tok.Sign(c.key) + if err != nil { + panic(err) + } + + var prm client.PrmObjectDelete + prm.ByID(cliObjectID) + prm.FromContainer(cliContainerID) + prm.WithinSession(tok) + + _, err = c.cli.ObjectDelete(c.vu.Context(), prm) + if err != nil { + return DeleteResponse{Success: false, Error: err.Error()} + } + + return DeleteResponse{Success: true} +} + func (c *Client) Get(containerID, objectID string) GetResponse { cliContainerID := parseContainerID(containerID) cliObjectID := parseObjectID(objectID) diff --git a/internal/registry/obj_registry.go b/internal/registry/obj_registry.go index 58543db..bb5e24c 100644 --- a/internal/registry/obj_registry.go +++ b/internal/registry/obj_registry.go @@ -106,6 +106,17 @@ func (o *ObjRegistry) SetObjectStatus(id uint64, newStatus string) error { }) } +func (o *ObjRegistry) DeleteObject(id uint64) error { + return o.boltDB.Update(func(tx *bbolt.Tx) error { + b, err := tx.CreateBucketIfNotExists([]byte(bucketName)) + if err != nil { + return err + } + + return b.Delete(encodeId(id)) + }) +} + func (o *ObjRegistry) Close() error { return o.boltDB.Close() } diff --git a/internal/registry/obj_selector.go b/internal/registry/obj_selector.go index f41a634..c0d1556 100644 --- a/internal/registry/obj_selector.go +++ b/internal/registry/obj_selector.go @@ -18,6 +18,10 @@ type ObjSelector struct { filter *ObjFilter mu sync.Mutex lastId uint64 + // UTC date&time before which selector is locked for iteration or resetting. + // This lock prevents concurrency issues when some VUs are selecting objects + // while another VU resets the selector and attempts to select the same objects + lockedUntil time.Time } // NewObjSelector creates a new instance of object selector that can iterate over @@ -45,6 +49,10 @@ func (o *ObjSelector) NextObject() (*ObjectInfo, error) { o.mu.Lock() defer o.mu.Unlock() + if time.Now().UTC().Before(o.lockedUntil) { + return nil + } + // Establish the start position for searching the next object: // If we should go from the beginning (lastId=0), then we start from the first element // Otherwise we start from the key right after the lastId @@ -84,11 +92,19 @@ func (o *ObjSelector) NextObject() (*ObjectInfo, error) { } // Resets object selector to start scanning objects from the beginning. -func (o *ObjSelector) Reset() { +// After resetting the selector is locked for specified lockTime to prevent +// concurrency issues. +func (o *ObjSelector) Reset(lockTime int) bool { o.mu.Lock() defer o.mu.Unlock() + if time.Now().UTC().Before(o.lockedUntil) { + return false + } + o.lastId = 0 + o.lockedUntil = time.Now().UTC().Add(time.Duration(lockTime) * time.Second) + return true } // Count returns total number of objects that match filter of the selector. diff --git a/internal/s3/client.go b/internal/s3/client.go index 747a9ed..b82cc35 100644 --- a/internal/s3/client.go +++ b/internal/s3/client.go @@ -28,6 +28,11 @@ type ( Error string } + DeleteResponse struct { + Success bool + Error string + } + GetResponse struct { Success bool Error string @@ -66,6 +71,18 @@ func (c *Client) Put(bucket, key string, payload goja.ArrayBuffer) PutResponse { return PutResponse{Success: true} } +func (c *Client) Delete(bucket, key string) DeleteResponse { + _, err := c.cli.DeleteObject(c.vu.Context(), &s3.DeleteObjectInput{ + Bucket: aws.String(bucket), + Key: aws.String(key), + }) + if err != nil { + return DeleteResponse{Success: false, Error: err.Error()} + } + + return DeleteResponse{Success: true} +} + func (c *Client) Get(bucket, key string) GetResponse { stats.Report(c.vu, objGetTotal, 1) start := time.Now() diff --git a/scenarios/grpc.js b/scenarios/grpc.js index 7ae5c52..548b1c6 100644 --- a/scenarios/grpc.js +++ b/scenarios/grpc.js @@ -14,24 +14,6 @@ const container_list = new SharedArray('container_list', function () { const read_size = JSON.parse(open(__ENV.PREGEN_JSON)).obj_size; -/* - ./k6 run -e PROFILE=0:60 -e CLIENTS=200 -e WRITE_OBJ_SIZE=1024 \ - -e GRPC_ENDPOINTS=host1:8080,host2:8080 \ - -e PREGEN_JSON=test.json \ - scenarios/grpc.js - - REGISTRY_FILE - if set, all produced objects will be stored in database for subsequent verification. -*/ - -// Parse profile from env (format is write:duration) -// * write - percent of VUs performing write operations (the rest will be read VUs) -// * duration - duration in seconds -const [ write, duration ] = __ENV.PROFILE.split(':'); - -// Allocate VUs between write and read operations -const read_vu_count = Math.ceil(__ENV.CLIENTS / 100 * (100 - parseInt(write))); -const write_vu_count = __ENV.CLIENTS - read_vu_count; - // Select random gRPC endpoint for current VU const grpc_endpoints = __ENV.GRPC_ENDPOINTS.split(','); const grpc_endpoint = grpc_endpoints[Math.floor(Math.random() * grpc_endpoints.length)]; @@ -40,10 +22,27 @@ const grpc_client = native.connect(grpc_endpoint, ''); const registry_enabled = !!__ENV.REGISTRY_FILE; const obj_registry = registry_enabled ? registry.open(__ENV.REGISTRY_FILE) : undefined; +const duration = __ENV.DURATION; + +const delete_age = __ENV.DELETE_AGE ? parseInt(__ENV.DELETE_AGE) : undefined; +let obj_to_delete_selector = undefined; +if (registry_enabled && delete_age) { + obj_to_delete_selector = registry.getSelector( + __ENV.REGISTRY_FILE, + "obj_to_delete", + { + status: "created", + age: delete_age, + } + ); +} + + const generator = datagen.generator(1024 * parseInt(__ENV.WRITE_OBJ_SIZE)); const scenarios = {}; +const write_vu_count = parseInt(__ENV.WRITERS || '0'); if (write_vu_count > 0) { scenarios.write = { executor: 'constant-vus', @@ -51,9 +50,10 @@ if (write_vu_count > 0) { duration: `${duration}s`, exec: 'obj_write', gracefulStop: '5s', - } + }; } +const read_vu_count = parseInt(__ENV.READERS || '0'); if (read_vu_count > 0) { scenarios.read = { executor: 'constant-vus', @@ -61,13 +61,35 @@ if (read_vu_count > 0) { duration: `${duration}s`, exec: 'obj_read', gracefulStop: '5s', - } + }; } +const delete_vu_count = parseInt(__ENV.DELETERS || '0'); +if (delete_vu_count > 0) { + scenarios.delete = { + executor: 'constant-vus', + vus: delete_vu_count, + duration: `${duration}s`, + exec: 'obj_delete', + gracefulStop: '5s', + }; +} + +export const options = { + scenarios, + setupTimeout: '5s', +}; + export function setup() { - console.log("Pregenerated containers: " + container_list.length); - console.log("Pregenerated read object size: " + read_size); - console.log("Pregenerated total objects: " + obj_list.length); + const total_vu_count = write_vu_count + read_vu_count + delete_vu_count; + + console.log(`Pregenerated containers: ${container_list.length}`); + console.log(`Pregenerated read object size: ${read_size}`); + console.log(`Pregenerated total objects: ${obj_list.length}`); + console.log(`Reading VUs: ${read_vu_count}`); + console.log(`Writing VUs: ${write_vu_count}`); + console.log(`Deleting VUs: ${delete_vu_count}`); + console.log(`Total VUs: ${total_vu_count}`); } export function teardown(data) { @@ -76,11 +98,6 @@ export function teardown(data) { } } -export const options = { - scenarios, - setupTimeout: '5s', -}; - export function obj_write() { if (__ENV.SLEEP) { sleep(__ENV.SLEEP); @@ -115,6 +132,30 @@ export function obj_read() { } } +export function obj_delete() { + if (__ENV.SLEEP) { + sleep(__ENV.SLEEP); + } + + const obj = obj_to_delete_selector.nextObject(); + if (!obj) { + // If there are no objects to delete, we reset selector to start scanning from the + // beginning of registry. Then we wait for some time until suitable object might appear + obj_to_delete_selector.reset(delete_age); + sleep(delete_age / 2); + return; + } + + const resp = grpc_client.delete(obj.c_id, obj.o_id); + if (!resp.success) { + // Log errors except (2052 - object already deleted) + console.log(resp.error); + return; + } + + obj_registry.deleteObject(obj.id); +} + export function uuidv4() { return 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'.replace(/[xy]/g, function(c) { let r = Math.random() * 16 | 0, v = c === 'x' ? r : (r & 0x3 | 0x8); diff --git a/scenarios/http.js b/scenarios/http.js index 8ef07fe..863b779 100644 --- a/scenarios/http.js +++ b/scenarios/http.js @@ -1,93 +1,110 @@ import datagen from 'k6/x/neofs/datagen'; +import registry from 'k6/x/neofs/registry'; import http from 'k6/http'; import { SharedArray } from 'k6/data'; import { sleep } from 'k6'; const obj_list = new SharedArray('obj_list', function () { - return JSON.parse(open(__ENV.PREGEN_JSON)).objects; }); + return JSON.parse(open(__ENV.PREGEN_JSON)).objects; +}); const container_list = new SharedArray('container_list', function () { - return JSON.parse(open(__ENV.PREGEN_JSON)).containers; }); + return JSON.parse(open(__ENV.PREGEN_JSON)).containers; +}); const read_size = JSON.parse(open(__ENV.PREGEN_JSON)).obj_size; -/* - Parse profile from env. - Format write:obj_size: - * write - write operations in percent, relative to read operations - * duration - duration in seconds -*/ +// Select random HTTP endpoint for current VU +const http_endpoints = __ENV.HTTP_ENDPOINTS.split(','); +const http_endpoint = http_endpoints[Math.floor(Math.random() * http_endpoints.length)]; -const [ write, duration ] = __ENV.PROFILE.split(':'); +const registry_enabled = !!__ENV.REGISTRY_FILE; +const obj_registry = registry_enabled ? registry.open(__ENV.REGISTRY_FILE) : undefined; -// Set VUs between write and read operations -let vus_read = Math.ceil(__ENV.CLIENTS/100*(100-parseInt(write))) -let vus_write = __ENV.CLIENTS - vus_read +const duration = __ENV.DURATION; const generator = datagen.generator(1024 * parseInt(__ENV.WRITE_OBJ_SIZE)); -let nodes = __ENV.NODES.split(',') // node1.neofs -let rand_node = nodes[Math.floor(Math.random()*nodes.length)]; +const scenarios = {}; -let scenarios = {} - -if (vus_write > 0){ - scenarios.write= { +const write_vu_count = parseInt(__ENV.WRITERS || '0'); +if (write_vu_count > 0) { + scenarios.write = { executor: 'constant-vus', - vus: vus_write, + vus: write_vu_count, duration: `${duration}s`, exec: 'obj_write', gracefulStop: '5s', } } -if (vus_read > 0){ - scenarios.read= { +const read_vu_count = parseInt(__ENV.READERS || '0'); +if (read_vu_count > 0) { + scenarios.read = { executor: 'constant-vus', - vus: vus_read, + vus: read_vu_count, duration: `${duration}s`, exec: 'obj_read', gracefulStop: '5s', } } -export function setup() { - console.log("Pregenerated containers: " + container_list.length) - console.log("Pregenerated read object size: " + read_size) - console.log("Pregenerated total objects: " + obj_list.length) -} - export const options = { - scenarios: scenarios, + scenarios, setupTimeout: '5s', }; +export function setup() { + const total_vu_count = write_vu_count + read_vu_count + delete_vu_count; + + console.log(`Pregenerated containers: ${container_list.length}`); + console.log(`Pregenerated read object size: ${read_size}`); + console.log(`Pregenerated total objects: ${obj_list.length}`); + console.log(`Reading VUs: ${read_vu_count}`); + console.log(`Writing VUs: ${write_vu_count}`); + console.log(`Total VUs: ${total_vu_count}`); +} + +export function teardown(data) { + if (obj_registry) { + obj_registry.close(); + } +} + export function obj_write() { - const { payload } = generator.genPayload(false); - let data = { + if (__ENV.SLEEP) { + sleep(__ENV.SLEEP); + } + + const container = container_list[Math.floor(Math.random() * container_list.length)]; + + const { payload, hash } = generator.genPayload(registry_enabled); + const data = { field: uuidv4(), file: http.file(payload, "random.data"), }; - let container = container_list[Math.floor(Math.random()*container_list.length)]; - let resp = http.post(`http://${rand_node}/upload/${container}`, data); + const resp = http.post(`http://${http_endpoint}/upload/${container}`, data); if (resp.status != 200) { - console.log(`${resp.status}`); + console.log(`ERROR: ${resp.status} ${resp.error}`); + return; } - if (__ENV.SLEEP) { - sleep(__ENV.SLEEP); + const object_id = JSON.parse(resp.body).object_id; + if (obj_registry) { + obj_registry.addObject(container, object_id, "", "", hash); } } export function obj_read() { - let random_read_obj = obj_list[Math.floor(Math.random()*obj_list.length)]; - let resp = http.get(`http://${rand_node}/get/${random_read_obj.container}/${random_read_obj.object}`); - if (resp.status != 200) { - console.log(`${random_read_obj.object} - ${resp.status}`); - } if (__ENV.SLEEP) { sleep(__ENV.SLEEP); } + + const obj = obj_list[Math.floor(Math.random() * obj_list.length)]; + const resp = http.get(`http://${http_endpoint}/get/${obj.container}/${obj.object}`); + if (resp.status != 200) { + console.log(`ERROR reading ${obj.object}: ${resp.status}`); + } } export function uuidv4() { diff --git a/scenarios/run_scenarios.md b/scenarios/run_scenarios.md index b961769..5a082ee 100644 --- a/scenarios/run_scenarios.md +++ b/scenarios/run_scenarios.md @@ -2,6 +2,19 @@ # How to execute scenarios +## Common options for gRPC, HTTP, S3 scenarios: + +Scenarios `grpc.js`, `http.js` and `s3.js` support the following options: + * `DURATION` - duration of scenario in seconds. + * `READERS` - number of VUs performing read operations. + * `WRITERS` - number of VUs performing write operations. + * `REGISTRY_FILE` - if set, all produced objects will be stored in database for subsequent verification. Database file name will be set to the value of `REGISTRY_FILE`. + * `WRITE_OBJ_SIZE` - object size in kb for write(PUT) operations. + * `PREGEN_JSON` - path to json file with pre-generated containers and objects (in case of http scenario we use json pre-generated for grpc scenario). + * `SLEEP` - time interval (in seconds) between VU iterations. + +Examples of how to use these options are provided below for each scenario. + ## gRPC 1. Create pre-generated containers or objects: @@ -9,31 +22,44 @@ The tests will use all pre-created containers for PUT operations and all pre-created objects for READ operations. ```shell -./scenarios/preset/preset_grpc.py --size 1024 --containers 1 --out grpc.json --endpoint node4.intra:8080 --preload_obj 500 +$ ./scenarios/preset/preset_grpc.py --size 1024 --containers 1 --out grpc.json --endpoint host1:8080 --preload_obj 500 ``` 2. Execute scenario with options: ```shell -$ ./k6 run -e PROFILE=50:60 -e WRITE_OBJ_SIZE=8192 -e CLIENTS=400 -e GRPC_ENDPOINTS=node1.data:8080,node4.data:8080 -e PREGEN_JSON=./grpc.json scenarios/grpc.js +$ ./k6 run -e DURATION=60 -e WRITE_OBJ_SIZE=8192 -e READERS=20 -e WRITERS=20 -e DELETERS=30 -e DELETE_AGE=10 -e REGITRY_FILE=registry.bolt -e GRPC_ENDPOINTS=host1:8080,host2:8080 -e PREGEN_JSON=./grpc.json scenarios/grpc.js ``` -Options: - * PROFILE - format write:duration - * write - percent of VUs performing write operations (the rest will be read VUs) - * duration - time in seconds - * CLIENTS - number of VUs for all operations - * WRITE_OBJ_SIZE - object size in kb for write(PUT) operations - * PREGEN_JSON - path to json file with pre-generated containers and objects - * REGISTRY_FILE - if set, all produced objects will be stored in database for subsequent verification. Database file name will be set to value of REGISTRY_FILE. - * SLEEP - time interval (in seconds) between VU iterations. +Options (in addition to the common options): + * `GRPC_ENDPOINTS` - GRPC endpoints of neoFS in format `host:port`. To specify multiple endpoints separate them by comma. + * `DELETERS` - number of VUs performing delete operations (using deleters requires that options `DELETE_AGE` and `REGISTRY_FILE` are specified as well). + * `DELETE_AGE` - age of object in seconds before which it can not be deleted. This parameter can be used to control how many objects we have in the system under load. + +## HTTP + +1. Create pre-generated containers or objects: + +There is no dedicated script to preset HTTP scenario, so we use the same script as for gRPC: +```shell +$ ./scenarios/preset/preset_grpc.py --size 1024 --containers 1 --out grpc.json --endpoint host1:8080 --preload_obj 500 +``` + +2. Execute scenario with options: + +```shell +$ ./k6 run -e DURATION=60 -e WRITE_OBJ_SIZE=8192 -e READERS=10 -e WRITERS=20 -e REGITRY_FILE=registry.bolt -e HTTP_ENDPOINTS=host1:8888,host2:8888 -e PREGEN_JSON=./grpc.json scenarios/http.js +``` + +Options (in addition to the common options): + * `HTTP_ENDPOINTS` - endpoints of HTTP gateways in format `host:port`. To specify multiple endpoints separate them by comma. ## S3 1. Create s3 credentials: ```shell -$ neofs-s3-authmate issue-secret --wallet wallet.json --peer node1.intra:8080 --gate-public-key 03d33a2cc7b8daaa5a3df3fccf065f7cf1fc6a3279efc161fcec512dcc0c1b2277 --gate-public-key 03ff0ad212e10683234442530bfd71d0bb18c3fbd6459aba768eacf158b0c359a2 --gate-public-key 033ae03ff30ed3b6665af69955562cfc0eae18d50e798ab31f054ee22e32fee993 --gate-public-key 02127c7498de0765d2461577c9d4f13f916eefd1884896183e6de0d9a85d17f2fb --bearer-rules rules.json --container-placement-policy "REP 1 IN X CBF 1 SELECT 1 FROM * AS X" +$ neofs-s3-authmate issue-secret --wallet wallet.json --peer host1:8080 --gate-public-key 03d33a2cc7b8daaa5a3df3fccf065f7cf1fc6a3279efc161fcec512dcc0c1b2277 --gate-public-key 03ff0ad212e10683234442530bfd71d0bb18c3fbd6459aba768eacf158b0c359a2 --gate-public-key 033ae03ff30ed3b6665af69955562cfc0eae18d50e798ab31f054ee22e32fee993 --gate-public-key 02127c7498de0765d2461577c9d4f13f916eefd1884896183e6de0d9a85d17f2fb --bearer-rules rules.json --container-placement-policy "REP 1 IN X CBF 1 SELECT 1 FROM * AS X" Enter password for wallet.json > { @@ -51,33 +77,38 @@ Run `aws configure`. The tests will use all pre-created buckets for PUT operations and all pre-created objects for READ operations. ```shell -./scenarios/preset/preset_s3.py --size 1024 --buckets 1 --out s3.json --endpoint node4.intra:8084 --preload_obj 500 +$ ./scenarios/preset/preset_s3.py --size 1024 --buckets 1 --out s3.json --endpoint host1:8084 --preload_obj 500 ``` 3. Execute scenario with options: ```shell -$ ./k6 run -e PROFILE=50:60 -e WRITE_OBJ_SIZE=8192 -e CLIENTS=400 -e S3_ENDPOINTS=node1.data:8084,node4.data:8084 -e PREGEN_JSON=s3.json scenarios/s3.js +$ ./k6 run -e DURATION=60 -e WRITE_OBJ_SIZE=8192 -e READERS=20 -e WRITERS=20 -e DELETERS=30 -e DELETE_AGE=10 -e S3_ENDPOINTS=host1:8084,host2:8084 -e PREGEN_JSON=s3.json scenarios/s3.js ``` -Options are identical to gRPC scenario, plus: - * OBJ_NAME - if specified, this name will be used for all write operations instead of random generation. +Options (in addition to the common options): + * `S3_ENDPOINTS` - endpoints of S3 gateways in format `host:port`. To specify multiple endpoints separate them by comma. + * `DELETERS` - number of VUs performing delete operations (using deleters requires that options `DELETE_AGE` and `REGISTRY_FILE` are specified as well). + * `DELETE_AGE` - age of object in seconds before which it can not be deleted. This parameter can be used to control how many objects we have in the system under load. + * `OBJ_NAME` - if specified, this name will be used for all write operations instead of random generation. ## Verify -This scenario allows to verify that objects created by a previous run are really stored in the system and their data is not corrupted. Running this scenario assumes that you've already run gRPC and/or S3 scenario with option `REGISTRY_FILE`. +This scenario allows to verify that objects created by a previous run are really stored in the system and their data is not corrupted. Running this scenario assumes that you've already run gRPC or HTTP or S3 scenario with option `REGISTRY_FILE`. To verify stored objects execute scenario with options: ``` -./k6 run -e CLIENTS=200 -e TIME_LIMIT=120 -e GRPC_ENDPOINTS=node1.data:8080,node2.data:8080 -e S3_ENDPOINTS=node1.data:8084,node2.data:8084 -e REGISTRY_FILE=registry.bolt scenarios/verify.js +./k6 run -e CLIENTS=200 -e TIME_LIMIT=120 -e GRPC_ENDPOINTS=host1:8080,host2:8080 -e S3_ENDPOINTS=host1:8084,host2:8084 -e REGISTRY_FILE=registry.bolt scenarios/verify.js ``` Scenario picks up all objects in `created` status. If object is stored correctly, its' status will be changed into `verified`. If object does not exist or its' data is corrupted, then the status will be changed into `invalid`. Scenario ends as soon as all objects are checked or time limit is exceeded. +Objects produced by HTTP scenario will be verified via gRPC endpoints. + Options: - * CLIENTS - number of VUs for verifying objects (VU can handle both GRPC and S3 objects) - * TIME_LIMIT - amount of time in seconds that is sufficient to verify all objects. If this time interval ends, then verification process will be interrupted and objects that have not been checked will stay in the `created` state. - * REGISTRY_FILE - database file from which objects for verification should be read. - * SLEEP - time interval (in seconds) between VU iterations. \ No newline at end of file + * `CLIENTS` - number of VUs for verifying objects (VU can handle both GRPC and S3 objects) + * `TIME_LIMIT` - amount of time in seconds that is sufficient to verify all objects. If this time interval ends, then verification process will be interrupted and objects that have not been checked will stay in the `created` state. + * `REGISTRY_FILE` - database file from which objects for verification should be read. + * `SLEEP` - time interval (in seconds) between VU iterations. diff --git a/scenarios/s3.js b/scenarios/s3.js index b626209..e28b536 100644 --- a/scenarios/s3.js +++ b/scenarios/s3.js @@ -14,22 +14,6 @@ const bucket_list = new SharedArray('bucket_list', function () { const read_size = JSON.parse(open(__ENV.PREGEN_JSON)).obj_size; -/* - ./k6 run -e PROFILE=0:60 -e CLIENTS=200 -e WRITE_OBJ_SIZE=1024 \ - -e S3_ENDPOINTS=host1:8084,host2:8084 -e PREGEN_JSON=test.json \ - scenarios/s3.js - - OBJ_NAME - if specified, this name will be used for all write operations instead of random generation. - REGISTRY_FILE - if set, all produced objects will be stored in database for subsequent verification. -*/ - -// Parse profile from env -const [ write, duration ] = __ENV.PROFILE.split(':'); - -// Allocate VUs between write and read operations -let read_vu_count = Math.ceil(__ENV.CLIENTS / 100 * (100 - parseInt(write))); -let write_vu_count = __ENV.CLIENTS - read_vu_count; - // Select random S3 endpoint for current VU const s3_endpoints = __ENV.S3_ENDPOINTS.split(','); const s3_endpoint = s3_endpoints[Math.floor(Math.random() * s3_endpoints.length)]; @@ -38,11 +22,27 @@ const s3_client = s3.connect(`http://${s3_endpoint}`); const registry_enabled = !!__ENV.REGISTRY_FILE; const obj_registry = registry_enabled ? registry.open(__ENV.REGISTRY_FILE) : undefined; +const duration = __ENV.DURATION; + +const delete_age = __ENV.DELETE_AGE ? parseInt(__ENV.DELETE_AGE) : undefined; +let obj_to_delete_selector = undefined; +if (registry_enabled && delete_age) { + obj_to_delete_selector = registry.getSelector( + __ENV.REGISTRY_FILE, + "obj_to_delete", + { + status: "created", + age: delete_age, + } + ); +} + const generator = datagen.generator(1024 * parseInt(__ENV.WRITE_OBJ_SIZE)); const scenarios = {}; -if (write_vu_count > 0){ +const write_vu_count = parseInt(__ENV.WRITERS || '0'); +if (write_vu_count > 0) { scenarios.write = { executor: 'constant-vus', vus: write_vu_count, @@ -52,7 +52,8 @@ if (write_vu_count > 0){ }; } -if (read_vu_count > 0){ +const read_vu_count = parseInt(__ENV.READERS || '0'); +if (read_vu_count > 0) { scenarios.read = { executor: 'constant-vus', vus: read_vu_count, @@ -62,10 +63,32 @@ if (read_vu_count > 0){ }; } +const delete_vu_count = parseInt(__ENV.DELETERS || '0'); +if (delete_vu_count > 0) { + scenarios.delete = { + executor: 'constant-vus', + vus: delete_vu_count, + duration: `${duration}s`, + exec: 'obj_delete', + gracefulStop: '5s', + }; +} + +export const options = { + scenarios, + setupTimeout: '5s', +}; + export function setup() { - console.log("Pregenerated buckets: " + bucket_list.length); - console.log("Pregenerated read object size: " + read_size); - console.log("Pregenerated total objects: " + obj_list.length); + const total_vu_count = write_vu_count + read_vu_count + delete_vu_count; + + console.log(`Pregenerated buckets: ${bucket_list.length}`); + console.log(`Pregenerated read object size: ${read_size}`); + console.log(`Pregenerated total objects: ${obj_list.length}`); + console.log(`Reading VUs: ${read_vu_count}`); + console.log(`Writing VUs: ${write_vu_count}`); + console.log(`Deleting VUs: ${delete_vu_count}`); + console.log(`Total VUs: ${total_vu_count}`); } export function teardown(data) { @@ -74,11 +97,6 @@ export function teardown(data) { } } -export const options = { - scenarios, - setupTimeout: '5s', -}; - export function obj_write() { if (__ENV.SLEEP) { sleep(__ENV.SLEEP); @@ -112,6 +130,29 @@ export function obj_read() { } } +export function obj_delete() { + if (__ENV.SLEEP) { + sleep(__ENV.SLEEP); + } + + const obj = obj_to_delete_selector.nextObject(); + if (!obj) { + // If there are no objects to delete, we reset selector to start scanning from the + // beginning of registry. Then we wait for some time until suitable object might appear + obj_to_delete_selector.reset(delete_age); + sleep(delete_age / 2); + return; + } + + const resp = s3_client.delete(obj.s3_bucket, obj.s3_key); + if (!resp.success) { + console.log(`Error deleting object ${obj.id}: ${resp.error}`); + return; + } + + obj_registry.deleteObject(obj.id); +} + export function uuidv4() { return 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'.replace(/[xy]/g, function(c) { let r = Math.random() * 16 | 0, v = c === 'x' ? r : (r & 0x3 | 0x8); diff --git a/scenarios/verify.js b/scenarios/verify.js index 75ce0c0..25ebf84 100644 --- a/scenarios/verify.js +++ b/scenarios/verify.js @@ -4,16 +4,16 @@ import s3 from 'k6/x/neofs/s3'; import { sleep } from 'k6'; import { Counter } from 'k6/metrics'; -/* - ./k6 run -e CLIENTS=200 -e TIME_LIMIT=30 -e GRPC_ENDPOINTS=node4.data:8084 - -e REGISTRY_FILE=registry.bolt scenarios/verify.js -*/ const obj_registry = registry.open(__ENV.REGISTRY_FILE); // Time limit (in seconds) for the run const time_limit = __ENV.TIME_LIMIT || "60"; -// Count of objects in each status +// Number of objects in each status. These counters are cumulative in a +// sense that they reflect total number of objects in the registry, not just +// number of objects that were processed by specific run of this scenario. +// This allows to run this scenario multiple times and collect overall +// statistics in the final run. const obj_counters = { verified: new Counter('verified_obj'), skipped: new Counter('skipped_obj'), @@ -85,7 +85,6 @@ export function obj_verify() { console.log("All objects have been verified"); return; } - console.log(`Verifying object ${obj.id}`); const obj_status = verify_object_with_retries(obj, 3); obj_counters[obj_status].add(1);