From bf4a11474e303fe4c340e2bb27355b10af6420b9 Mon Sep 17 00:00:00 2001 From: Vladimir Domnich Date: Wed, 21 Sep 2022 17:22:33 +0400 Subject: [PATCH] [#19] Add counters to verify scenario Signed-off-by: Vladimir Domnich --- internal/registry/obj_registry.go | 26 +++++++++++ scenarios/run_scenarios.md | 4 +- scenarios/verify.js | 76 ++++++++++++++++++++----------- 3 files changed, 79 insertions(+), 27 deletions(-) diff --git a/internal/registry/obj_registry.go b/internal/registry/obj_registry.go index 1007dfc..fb44bed 100644 --- a/internal/registry/obj_registry.go +++ b/internal/registry/obj_registry.go @@ -104,6 +104,32 @@ func (o *ObjRegistry) SetObjectStatus(id uint64, newStatus string) error { }) } +func (o *ObjRegistry) GetObjectCountInStatus(status string) (int, error) { + var objCount = 0 + err := o.boltDB.View(func(tx *bbolt.Tx) error { + b := tx.Bucket([]byte(bucketName)) + if b == nil { + return nil + } + + c := b.Cursor() + for keyBytes, objBytes := c.First(); keyBytes != nil; keyBytes, objBytes = c.Next() { + if objBytes != nil { + var obj ObjectInfo + if err := json.Unmarshal(objBytes, &obj); err != nil { + // Ignore malformed objects + continue + } + if obj.Status == status { + objCount++ + } + } + } + return nil + }) + return objCount, err +} + func (o *ObjRegistry) NextObjectToVerify() (*ObjectInfo, error) { return o.objSelector.NextObject() } diff --git a/scenarios/run_scenarios.md b/scenarios/run_scenarios.md index c0bc537..b961769 100644 --- a/scenarios/run_scenarios.md +++ b/scenarios/run_scenarios.md @@ -26,6 +26,7 @@ Options: * WRITE_OBJ_SIZE - object size in kb for write(PUT) operations * PREGEN_JSON - path to json file with pre-generated containers and objects * REGISTRY_FILE - if set, all produced objects will be stored in database for subsequent verification. Database file name will be set to value of REGISTRY_FILE. + * SLEEP - time interval (in seconds) between VU iterations. ## S3 @@ -73,9 +74,10 @@ To verify stored objects execute scenario with options: ``` Scenario picks up all objects in `created` status. If object is stored correctly, its' status will be changed into `verified`. If object does not exist or its' data is corrupted, then the status will be changed into `invalid`. -Scenario ends as soon as all objects are checked (return code will be [108](https://k6.io/docs/javascript-api/k6-execution/#test)). +Scenario ends as soon as all objects are checked or time limit is exceeded. Options: * CLIENTS - number of VUs for verifying objects (VU can handle both GRPC and S3 objects) * TIME_LIMIT - amount of time in seconds that is sufficient to verify all objects. If this time interval ends, then verification process will be interrupted and objects that have not been checked will stay in the `created` state. * REGISTRY_FILE - database file from which objects for verification should be read. + * SLEEP - time interval (in seconds) between VU iterations. \ No newline at end of file diff --git a/scenarios/verify.js b/scenarios/verify.js index 2dee6ee..9e8422a 100644 --- a/scenarios/verify.js +++ b/scenarios/verify.js @@ -2,18 +2,24 @@ import native from 'k6/x/neofs/native'; import registry from 'k6/x/neofs/registry'; import s3 from 'k6/x/neofs/s3'; import { sleep } from 'k6'; -import exec from 'k6/execution'; +import { Counter } from 'k6/metrics'; /* ./k6 run -e CLIENTS=200 -e TIME_LIMIT=30 -e GRPC_ENDPOINTS=node4.data:8084 -e REGISTRY_FILE=registry.bolt scenarios/verify.js */ - const obj_registry = registry.open(__ENV.REGISTRY_FILE); // Time limit (in seconds) for the run const time_limit = __ENV.TIME_LIMIT || "60"; +// Count of objects in each status +const obj_counters = { + verified: new Counter('verified_obj'), + skipped: new Counter('skipped_obj'), + invalid: new Counter('invalid_obj'), +}; + // Connect to random gRPC endpoint let grpc_client = undefined; if (__ENV.GRPC_ENDPOINTS) { @@ -30,11 +36,15 @@ if (__ENV.S3_ENDPOINTS) { s3_client = s3.connect(`http://${s3_endpoint}`); } +// We will attempt to verify every object in "created" status. The scenario will execute +// as many scenarios as there are objects. Each object will have 3 retries to be verified +const obj_count_to_verify = obj_registry.getObjectCountInStatus("created"); const scenarios = { verify: { - executor: 'constant-vus', + executor: 'shared-iterations', vus: __ENV.CLIENTS, - duration: `${time_limit}s`, + iterations: obj_count_to_verify, + maxDuration: `${time_limit}s`, exec: 'obj_verify', gracefulStop: '5s', } @@ -45,6 +55,13 @@ export const options = { setupTimeout: '5s', }; +export function setup() { + // Populate counters with initial values + for (const [status, counter] of Object.entries(obj_counters)) { + counter.add(obj_registry.getObjectCountInStatus(status)); + } +} + export function obj_verify() { if (__ENV.SLEEP) { sleep(__ENV.SLEEP); @@ -52,31 +69,38 @@ export function obj_verify() { const obj = obj_registry.nextObjectToVerify(); if (!obj) { - // TODO: consider using a metric with abort condition to stop execution when - // all VUs have no objects to verify. Alternative solution could be a - // shared-iterations executor, but it might be not a good choice, as we need to - // check same object several times (if specific request fails) - - // Allow time for other VUs to complete verification - sleep(30.0); - exec.test.abort("All objects have been verified"); + console.log("All objects have been verified"); + return; } console.log(`Verifying object ${obj.id}`); - let result = undefined; - if (obj.c_id && obj.o_id) { - result = grpc_client.verifyHash(obj.c_id, obj.o_id, obj.payload_hash); - } else if (obj.s3_bucket && obj.s3_key) { - result = s3_client.verifyHash(obj.s3_bucket, obj.s3_key, obj.payload_hash); - } else { - console.log(`Object id=${obj.id} cannot be verified with supported protocols`); - obj_registry.setObjectStatus(obj.id, "skipped"); + const obj_status = verify_object_with_retries(obj, 3); + obj_counters[obj_status].add(1); + obj_registry.setObjectStatus(obj.id, obj_status); +} + +function verify_object_with_retries(obj, attempts) { + for (let i = 0; i < attempts; i++) { + let result; + if (obj.c_id && obj.o_id) { + result = grpc_client.verifyHash(obj.c_id, obj.o_id, obj.payload_hash); + } else if (obj.s3_bucket && obj.s3_key) { + result = s3_client.verifyHash(obj.s3_bucket, obj.s3_key, obj.payload_hash); + } else { + console.log(`Object id=${obj.id} cannot be verified with supported protocols`); + return "skipped"; + } + + if (result.success) { + return "verified"; + } else if (result.error == "hash mismatch") { + return "invalid"; + } + + // Unless we explicitly saw that there was a hash mismatch, then we will retry after a delay + console.log(`Verify error on ${obj.id}: {resp.error}. Object will be re-tried`); + sleep(__ENV.SLEEP); } - if (result.success) { - obj_registry.setObjectStatus(obj.id, "verified"); - } else { - obj_registry.setObjectStatus(obj.id, "invalid"); - console.log(`Verify error on ${obj.c_id}/${obj.o_id}: {resp.error}`); - } + return "invalid"; }