forked from TrueCloudLab/xk6-frostfs
Compare commits
18 commits
add-lint-i
...
master
Author | SHA1 | Date | |
---|---|---|---|
8f9c02253c | |||
829777bd53 | |||
3ebb3dda0a | |||
3c6023ca29 | |||
a326fbcbf8 | |||
14f26e47dc | |||
296971e57c | |||
76fd5c9706 | |||
f0cbf9c301 | |||
124397578d | |||
a7079cda60 | |||
d3d5a1baed | |||
72d24b04a3 | |||
f5df03c718 | |||
1c7a3b3b6c | |||
e0cbc3b763 | |||
54f99dac1d | |||
591f8af161 |
27 changed files with 254 additions and 56 deletions
|
@ -1 +0,0 @@
|
|||
* @TrueCloudLab/storage-core @TrueCloudLab/storage-services
|
3
CODEOWNERS
Normal file
3
CODEOWNERS
Normal file
|
@ -0,0 +1,3 @@
|
|||
.* @TrueCloudLab/storage-core-committers @TrueCloudLab/storage-core-developers @TrueCloudLab/storage-services-committers @TrueCloudLab/storage-services-developers
|
||||
.forgejo/.* @potyarkin
|
||||
Makefile @potyarkin
|
|
@ -27,7 +27,8 @@ Start by forking the `xk6-frostfs` repository, make changes in a branch and then
|
|||
send a pull request. We encourage pull requests to discuss code changes. Here
|
||||
are the steps in details:
|
||||
|
||||
### Set up your GitHub Repository
|
||||
### Set up your repository
|
||||
|
||||
Fork [xk6-frostfs upstream](https://git.frostfs.info/TrueCloudLab/xk6-frostfs/fork) source
|
||||
repository to your own personal repository. Copy the URL of your fork (you will
|
||||
need it for the `git clone` command below).
|
||||
|
@ -89,7 +90,7 @@ $ git push origin feature/123-something_awesome
|
|||
```
|
||||
|
||||
### Create a Pull Request
|
||||
Pull requests can be created via GitHub. Refer to [this
|
||||
Pull requests can be created via git.frostfs.info. Refer to [this
|
||||
document](https://help.github.com/articles/creating-a-pull-request/) for
|
||||
detailed steps on how to create a pull request. After a Pull Request gets peer
|
||||
reviewed and approved, it will be merged.
|
||||
|
|
24
README.md
24
README.md
|
@ -57,7 +57,7 @@ const frostfs_cli = native.connect("s01.frostfs.devenv:8080", "", 0, 0, false, 0
|
|||
|
||||
### Methods
|
||||
- `putContainer(params)`. The `params` is a dictionary (e.g.
|
||||
`{acl:'public-read-write',placement_policy:'REP 3',name:'container-name',name_global_scope:'false'}`).
|
||||
`{placement_policy:'REP 3',name:'container-name',name_global_scope:'false'}`).
|
||||
Returns dictionary with `success`
|
||||
boolean flag, `container_id` string, and `error` string.
|
||||
- `setBufferSize(size)`. Sets internal buffer size for data upload and
|
||||
|
@ -107,11 +107,12 @@ const s3_cli = s3.connect("https://s3.frostfs.devenv:8080")
|
|||
You can also provide additional options:
|
||||
```js
|
||||
import s3 from 'k6/x/frostfs/s3';
|
||||
const s3_cli = s3.connect("https://s3.frostfs.devenv:8080", {'no_verify_ssl': 'true', 'timeout': '60s'})
|
||||
const s3_cli = s3.connect("https://s3.frostfs.devenv:8080", {'no_verify_ssl': 'true', 'timeout': '60s', 'aws_profile': 'metal'})
|
||||
```
|
||||
|
||||
* `no_verify_ss` - Bool. If `true` - skip verifying the s3 certificate chain and host name (useful if s3 uses self-signed certificates)
|
||||
* `timeout` - Duration. Set timeout for requests (in http client). If omitted or zero - timeout is infinite.
|
||||
* `aws_profile` - String. Use custom profile credentials from `$HOME/.aws/credentials` file. If omitted or empty - use default profile.
|
||||
|
||||
### Methods
|
||||
- `createBucket(bucket, params)`. Returns dictionary with `success` boolean flag
|
||||
|
@ -186,6 +187,25 @@ Flags:
|
|||
-v, --version version for registry-exporter
|
||||
```
|
||||
|
||||
## Import pregen into registry db
|
||||
|
||||
You can import pregenerated json files into registry bolt db. Use `frostfs-xk6-registry import`. Usage examples are in help:
|
||||
|
||||
```shell
|
||||
$ ./bin/frostfs-xk6-registry import -h
|
||||
Import objects into registry from pregenerated files
|
||||
|
||||
Usage:
|
||||
xk6-registry import [flags]
|
||||
|
||||
Examples:
|
||||
xk6-registry import registry.bolt preset.json
|
||||
xk6-registry import registry.bolt preset.json another_preset.json
|
||||
|
||||
Flags:
|
||||
-h, --help help for import
|
||||
```
|
||||
|
||||
# License
|
||||
|
||||
- [GNU General Public License v3.0](LICENSE)
|
||||
|
|
|
@ -11,7 +11,7 @@ var Cmd = &cobra.Command{
|
|||
Short: "Import objects into registry",
|
||||
Long: "Import objects into registry from pregenerated files",
|
||||
Example: `xk6-registry import registry.bolt preset.json
|
||||
xk6-registry import --status created registry.bolt preset.json another_preset.json`,
|
||||
xk6-registry import registry.bolt preset.json another_preset.json`,
|
||||
RunE: runCmd,
|
||||
Args: cobra.MinimumNArgs(2),
|
||||
}
|
||||
|
|
|
@ -1,7 +1,8 @@
|
|||
import local from 'k6/x/frostfs/local';
|
||||
import datagen from 'k6/x/frostfs/datagen';
|
||||
import { uuidv4 } from '../scenarios/libs/k6-utils-1.4.0.js';
|
||||
|
||||
const payload = open('../go.sum', 'b');
|
||||
const generator = datagen.generator(1024, "random", false);
|
||||
const local_cli = local.connect("/path/to/config.yaml", "/path/to/config/dir", "", false)
|
||||
|
||||
export const options = {
|
||||
|
@ -15,6 +16,7 @@ export default function () {
|
|||
'unique_header': uuidv4()
|
||||
}
|
||||
const container_id = '6BVPPXQewRJ6J5EYmAPLczXxNocS7ikyF7amS2esWQnb';
|
||||
const payload = generator.genPayload()
|
||||
let resp = local_cli.put(container_id, headers, payload)
|
||||
if (resp.success) {
|
||||
local_cli.get(container_id, resp.object_id)
|
||||
|
|
|
@ -1,8 +1,9 @@
|
|||
import native from 'k6/x/frostfs/native';
|
||||
import datagen from 'k6/x/frostfs/datagen';
|
||||
import { fail } from "k6";
|
||||
import { uuidv4 } from '../scenarios/libs/k6-utils-1.4.0.js';
|
||||
|
||||
const payload = open('../go.sum', 'b');
|
||||
const generator = datagen.generator(1024, "random", false);
|
||||
const frostfs_cli = native.connect("s01.frostfs.devenv:8080", "1dd37fba80fec4e6a6f13fd708d8dcb3b29def768017052f6c930fa1c5d90bbb", 0, 0, false, 0)
|
||||
|
||||
export const options = {
|
||||
|
@ -13,7 +14,6 @@ export const options = {
|
|||
|
||||
export function setup() {
|
||||
const params = {
|
||||
acl: 'public-read-write',
|
||||
placement_policy: 'REP 3',
|
||||
name: 'container-name',
|
||||
name_global_scope: 'false'
|
||||
|
@ -28,6 +28,7 @@ export function setup() {
|
|||
}
|
||||
|
||||
export default function (data) {
|
||||
const payload = generator.genPayload()
|
||||
let headers = {
|
||||
'unique_header': uuidv4()
|
||||
}
|
||||
|
|
|
@ -1,7 +1,9 @@
|
|||
import native from 'k6/x/frostfs/native';
|
||||
import datagen from 'k6/x/frostfs/datagen';
|
||||
import { uuidv4 } from '../scenarios/libs/k6-utils-1.4.0.js';
|
||||
|
||||
const payload = open('../go.sum', 'b');
|
||||
const generator = datagen.generator(1024, "random", false);
|
||||
const payload = generator.genPayload()
|
||||
const container = "AjSxSNNXbJUDPqqKYm1VbFVDGCakbpUNH8aGjPmGAH3B"
|
||||
const frostfs_cli = native.connect("s01.frostfs.devenv:8080", "", 0, 0, false, 0)
|
||||
const frostfs_obj = frostfs_cli.onsite(container, payload)
|
||||
|
|
|
@ -1,8 +1,9 @@
|
|||
import s3 from 'k6/x/frostfs/s3';
|
||||
import datagen from 'k6/x/frostfs/datagen';
|
||||
import { fail } from 'k6'
|
||||
import { uuidv4 } from '../scenarios/libs/k6-utils-1.4.0.js';
|
||||
|
||||
const payload = open('../go.sum', 'b');
|
||||
const generator = datagen.generator(1024, "random", false);
|
||||
const bucket = "cats"
|
||||
const s3_cli = s3.connect("https://s3.frostfs.devenv:8080", {'no_verify_ssl': 'true'})
|
||||
|
||||
|
@ -16,7 +17,6 @@ export function setup() {
|
|||
const params = {
|
||||
acl: 'private',
|
||||
lock_enabled: 'true',
|
||||
location_constraint: 'ru'
|
||||
}
|
||||
|
||||
const res = s3_cli.createBucket(bucket, params)
|
||||
|
@ -27,6 +27,7 @@ export function setup() {
|
|||
|
||||
export default function () {
|
||||
const key = uuidv4();
|
||||
const payload = generator.genPayload()
|
||||
if (s3_cli.put(bucket, key, payload).success) {
|
||||
s3_cli.get(bucket, key)
|
||||
}
|
||||
|
|
|
@ -1,14 +1,16 @@
|
|||
import s3local from 'k6/x/frostfs/s3local';
|
||||
import datagen from 'k6/x/frostfs/datagen';
|
||||
import { uuidv4 } from '../scenarios/libs/k6-utils-1.4.0.js';
|
||||
|
||||
const bucket = "testbucket"
|
||||
const payload = open('../go.sum', 'b');
|
||||
const generator = datagen.generator(1024, "random", false);
|
||||
const s3local_cli = s3local.connect("path/to/storage/config.yml", "path/to/storage/config/dir", {}, {
|
||||
'testbucket': 'GBQDDUM1hdodXmiRHV57EUkFWJzuntsG8BG15wFSwam6',
|
||||
});
|
||||
|
||||
export default function () {
|
||||
const key = uuidv4();
|
||||
const payload = generator.genPayload()
|
||||
if (s3local_cli.put(bucket, key, payload).success) {
|
||||
s3local_cli.get(bucket, key)
|
||||
}
|
||||
|
|
|
@ -1,6 +1,10 @@
|
|||
package logging
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/dop251/goja"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.k6.io/k6/js/modules"
|
||||
|
@ -55,14 +59,29 @@ func (r *RootModule) NewModuleInstance(vu modules.VU) modules.Instance {
|
|||
return &Logging{vu: vu}
|
||||
}
|
||||
|
||||
tsFormat, disableTs := time.TimeOnly, false
|
||||
if val, ok := vu.InitEnv().LookupEnv("DATE_FORMAT"); ok {
|
||||
switch strings.ToLower(val) {
|
||||
case "timeonly":
|
||||
case "datetime":
|
||||
tsFormat = time.DateTime
|
||||
case "none":
|
||||
disableTs = true
|
||||
default:
|
||||
panic(fmt.Sprintf("invalid value for DATE_FORMAT: %s (should be `timeonly`, `datetime` or `none`)", val))
|
||||
}
|
||||
}
|
||||
|
||||
format := lg.Formatter
|
||||
switch f := format.(type) {
|
||||
case *logrus.TextFormatter:
|
||||
f.ForceColors = true
|
||||
f.FullTimestamp = true
|
||||
f.TimestampFormat = "15:04:05"
|
||||
f.TimestampFormat = tsFormat
|
||||
f.DisableTimestamp = disableTs
|
||||
case *logrus.JSONFormatter:
|
||||
f.TimestampFormat = "15:04:05"
|
||||
f.TimestampFormat = tsFormat
|
||||
f.DisableTimestamp = disableTs
|
||||
}
|
||||
|
||||
return &Logging{vu: vu}
|
||||
|
|
|
@ -13,7 +13,6 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
|
@ -265,16 +264,6 @@ func (c *Client) PutContainer(params map[string]string) PutContainerResponse {
|
|||
container.SetCreationTime(&cnr, time.Now())
|
||||
cnr.SetOwner(usr)
|
||||
|
||||
if basicACLStr, ok := params["acl"]; ok {
|
||||
var basicACL acl.Basic
|
||||
err := basicACL.DecodeString(basicACLStr)
|
||||
if err != nil {
|
||||
return c.putCnrErrorResponse(err)
|
||||
}
|
||||
|
||||
cnr.SetBasicACL(basicACL)
|
||||
}
|
||||
|
||||
placementPolicyStr, ok := params["placement_policy"]
|
||||
if ok {
|
||||
var placementPolicy netmap.PlacementPolicy
|
||||
|
|
|
@ -118,11 +118,16 @@ func (n *Native) Connect(endpoint, hexPrivateKey string, dialTimeout, streamTime
|
|||
tok.SetAuthKey(&key)
|
||||
tok.SetExp(exp)
|
||||
|
||||
res, err := cli.NetworkInfo(n.vu.Context(), client.PrmNetworkInfo{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
prevEpoch := res.Info().CurrentEpoch() - 1
|
||||
tok.SetNbf(prevEpoch)
|
||||
tok.SetIat(prevEpoch)
|
||||
|
||||
if prepareLocally && maxObjSize > 0 {
|
||||
res, err := cli.NetworkInfo(n.vu.Context(), client.PrmNetworkInfo{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if uint64(maxObjSize) > res.Info().MaxObjectSize() {
|
||||
return nil, fmt.Errorf("max object size must be not greater than %d bytes", res.Info().MaxObjectSize())
|
||||
}
|
||||
|
|
|
@ -142,6 +142,70 @@ func (c *Client) Get(bucket, key string) GetResponse {
|
|||
return GetResponse{Success: true}
|
||||
}
|
||||
|
||||
// DeleteObjectVersion deletes object version with specified versionID.
|
||||
// If version argument is empty, deletes all versions and delete-markers of specified object.
|
||||
func (c *Client) DeleteObjectVersion(bucket, key, version string) DeleteResponse {
|
||||
var toDelete []types.ObjectIdentifier
|
||||
|
||||
if version != "" {
|
||||
toDelete = append(toDelete, types.ObjectIdentifier{
|
||||
Key: aws.String(key),
|
||||
VersionId: aws.String(version),
|
||||
})
|
||||
} else {
|
||||
versions, err := c.cli.ListObjectVersions(c.vu.Context(), &s3.ListObjectVersionsInput{
|
||||
Bucket: aws.String(bucket),
|
||||
Prefix: aws.String(key),
|
||||
})
|
||||
if err != nil {
|
||||
stats.Report(c.vu, objDeleteFails, 1)
|
||||
return DeleteResponse{Success: false, Error: err.Error()}
|
||||
}
|
||||
toDelete = filterObjectVersions(versions, key)
|
||||
}
|
||||
if len(toDelete) == 0 {
|
||||
return c.Delete(bucket, key)
|
||||
} else {
|
||||
_, err := c.cli.DeleteObjects(c.vu.Context(), &s3.DeleteObjectsInput{
|
||||
Bucket: aws.String(bucket),
|
||||
Delete: &types.Delete{
|
||||
Objects: toDelete,
|
||||
Quiet: true,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
stats.Report(c.vu, objDeleteFails, 1)
|
||||
return DeleteResponse{Success: false, Error: err.Error()}
|
||||
}
|
||||
}
|
||||
|
||||
return DeleteResponse{Success: true}
|
||||
}
|
||||
|
||||
func filterObjectVersions(versions *s3.ListObjectVersionsOutput, key string) []types.ObjectIdentifier {
|
||||
var result []types.ObjectIdentifier
|
||||
|
||||
for _, v := range versions.Versions {
|
||||
if *v.Key == key {
|
||||
result = append(result, types.ObjectIdentifier{
|
||||
Key: v.Key,
|
||||
VersionId: v.VersionId,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
for _, marker := range versions.DeleteMarkers {
|
||||
if *marker.Key == key {
|
||||
result = append(result, types.ObjectIdentifier{
|
||||
Key: marker.Key,
|
||||
VersionId: marker.VersionId,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func get(
|
||||
c *s3.Client,
|
||||
bucket string,
|
||||
|
@ -215,6 +279,26 @@ func (c *Client) CreateBucket(bucket string, params map[string]string) CreateBuc
|
|||
return CreateBucketResponse{Success: false, Error: err.Error()}
|
||||
}
|
||||
|
||||
var versioning bool
|
||||
if strVersioned, ok := params["versioning"]; ok {
|
||||
if versioning, err = strconv.ParseBool(strVersioned); err != nil {
|
||||
stats.Report(c.vu, createBucketFails, 1)
|
||||
return CreateBucketResponse{Success: false, Error: err.Error()}
|
||||
}
|
||||
}
|
||||
if versioning {
|
||||
_, err = c.cli.PutBucketVersioning(context.TODO(), &s3.PutBucketVersioningInput{
|
||||
Bucket: aws.String(bucket),
|
||||
VersioningConfiguration: &types.VersioningConfiguration{
|
||||
Status: types.BucketVersioningStatusEnabled,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
stats.Report(c.vu, createBucketFails, 1)
|
||||
return CreateBucketResponse{Success: false, Error: err.Error()}
|
||||
}
|
||||
}
|
||||
|
||||
stats.Report(c.vu, createBucketSuccess, 1)
|
||||
stats.Report(c.vu, createBucketDuration, metrics.D(time.Since(start)))
|
||||
return CreateBucketResponse{Success: true}
|
||||
|
|
|
@ -59,7 +59,9 @@ func (s *S3) Connect(endpoint string, params map[string]string) (*Client, error)
|
|||
}, nil
|
||||
})
|
||||
|
||||
cfg, err := config.LoadDefaultConfig(s.vu.Context(), config.WithEndpointResolverWithOptions(resolver))
|
||||
cfg, err := config.LoadDefaultConfig(s.vu.Context(),
|
||||
config.WithEndpointResolverWithOptions(resolver),
|
||||
config.WithSharedConfigProfile(params["aws_profile"]))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("configuration error: %w", err)
|
||||
}
|
||||
|
|
|
@ -1,2 +1,2 @@
|
|||
(()=>{"use strict";var t={n:r=>{var e=r&&r.__esModule?()=>r.default:()=>r;return t.d(e,{a:e}),e},d:(r,e)=>{for(var n in e)t.o(e,n)&&!t.o(r,n)&&Object.defineProperty(r,n,{enumerable:!0,get:e[n]})},o:(t,r)=>Object.prototype.hasOwnProperty.call(t,r),r:t=>{"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(t,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(t,"__esModule",{value:!0})}},r={};t.r(r),t.d(r,{findBetween:()=>x,getCurrentStageIndex:()=>i,normalDistributionStages:()=>m,parseDuration:()=>o,randomIntBetween:()=>d,randomItem:()=>h,randomString:()=>p,tagWithCurrentStageIndex:()=>u,tagWithCurrentStageProfile:()=>s,uuidv4:()=>g});const e=require("k6/execution");var n=t.n(e);function o(t){if(null==t||t.length<1)throw new Error("str is empty");for(var r=0,e="",n={},o=0;o<t.length;o++)if((a(t[o])||"."==t[o])&&(e+=t[o]),null!=t[o+1]&&!a(t[o+1])&&"."!=t[o+1]){var i=parseFloat(e,10),u=t[o+1];switch(u){case"d":r+=24*i*60*60*1e3;break;case"h":r+=60*i*60*1e3;break;case"m":o+2<t.length&&"s"==t[o+2]?(r+=Math.trunc(i),o++,u="ms"):r+=60*i*1e3;break;case"s":r+=1e3*i;break;default:throw new Error("".concat(u," is an unsupported time unit"))}if(n[u])throw new Error("".concat(u," time unit is provided multiple times"));n[u]=!0,o++,e=""}return e.length>0&&(r+=parseFloat(e,10)),r}function a(t){return t>="0"&&t<="9"}function i(){if(null==n()||null==n().test||null==n().test.options)throw new Error("k6/execution.test.options is undefined - getCurrentStageIndex requires a k6 v0.38.0 or later. Please, upgrade for getting k6/execution.test.options supported.");var t=n().test.options.scenarios[n().scenario.name];if(null==t)throw new Error("the exec.test.options object doesn't contain the current scenario ".concat(n().scenario.name));if(null==t.stages)throw new Error("only ramping-vus or ramping-arravial-rate supports stages, it is not possible to get a stage index on other executors.");if(t.stages.length<1)throw new Error("the current scenario ".concat(t.name," doesn't contain any stage"));for(var r=0,e=new Date-n().scenario.startTime,a=0;a<t.stages.length;a++)if(e<(r+=o(t.stages[a].duration)))return a;return t.stages.length-1}function u(){n().vu.tags.stage=i()}function s(){n().vu.tags.stage_profile=function(){var t=i();if(t<1)return"ramp-up";var r=n().test.options.scenarios[n().scenario.name].stages,e=r[t],o=r[t-1];return e.target>o.target?"ramp-up":o.target==e.target?"steady":"ramp-down"}()}const l=require("k6/crypto");function c(t){return function(t){if(Array.isArray(t))return f(t)}(t)||function(t){if("undefined"!=typeof Symbol&&null!=t[Symbol.iterator]||null!=t["@@iterator"])return Array.from(t)}(t)||function(t,r){if(!t)return;if("string"==typeof t)return f(t,r);var e=Object.prototype.toString.call(t).slice(8,-1);"Object"===e&&t.constructor&&(e=t.constructor.name);if("Map"===e||"Set"===e)return Array.from(t);if("Arguments"===e||/^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(e))return f(t,r)}(t)||function(){throw new TypeError("Invalid attempt to spread non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")}()}function f(t,r){(null==r||r>t.length)&&(r=t.length);for(var e=0,n=new Array(r);e<r;e++)n[e]=t[e];return n}function g(){var t=arguments.length>0&&void 0!==arguments[0]&&arguments[0];return t?y():v()}function d(t,r){return Math.floor(Math.random()*(r-t+1)+t)}function h(t){return t[Math.floor(Math.random()*t.length)]}function p(t){for(var r=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"abcdefghijklmnopqrstuvwxyz",e="";t--;)e+=r[Math.random()*r.length|0];return e}function x(t,r,e){for(var n,o=arguments.length>3&&void 0!==arguments[3]&&arguments[3],a=[],i=!0,u=0;i&&-1!=(n=t.indexOf(r))&&(n+=r.length,-1!=(u=t.indexOf(e,n)));){var s=t.substring(n,u);if(!o)return s;a.push(s),t=t.substring(u+e.length)}return a.length?a:null}function m(t,r){var e=arguments.length>2&&void 0!==arguments[2]?arguments[2]:10;function n(t,r,e){return Math.exp(-.5*Math.pow((e-t)/r,2))/(r*Math.sqrt(2*Math.PI))}for(var o=0,a=1,i=new Array(e+2).fill(0),u=new Array(e+2).fill(Math.ceil(r/6)),s=[],l=0;l<=e;l++)i[l]=n(o,a,-2*a+4*a*l/e);for(var f=Math.max.apply(Math,c(i)),g=i.map((function(r){return Math.round(r*t/f)})),d=1;d<=e;d++)u[d]=Math.ceil(4*r/(6*e));for(var h=0;h<=e+1;h++)s.push({duration:"".concat(u[h],"s"),target:g[h]});return s}function v(){return"xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx".replace(/[xy]/g,(function(t){var r=16*Math.random()|0;return("x"===t?r:3&r|8).toString(16)}))}function y(){for(var t=[],r=0;r<256;++r)t.push((r+256).toString(16).slice(1));var e=new Uint8Array((0,l.randomBytes)(16));return e[6]=15&e[6]|64,e[8]=63&e[8]|128,(t[e[0]]+t[e[1]]+t[e[2]]+t[e[3]]+"-"+t[e[4]]+t[e[5]]+"-"+t[e[6]]+t[e[7]]+"-"+t[e[8]]+t[e[9]]+"-"+t[e[10]]+t[e[11]]+t[e[12]]+t[e[13]]+t[e[14]]+t[e[15]]).toLowerCase()}var w=exports;for(var b in r)w[b]=r[b];r.__esModule&&Object.defineProperty(w,"__esModule",{value:!0})})();
|
||||
//# sourceMappingURL=index.js.map
|
||||
|
||||
|
|
34
scenarios/libs/keygen.js
Normal file
34
scenarios/libs/keygen.js
Normal file
|
@ -0,0 +1,34 @@
|
|||
import { uuidv4 } from './k6-utils-1.4.0.js';
|
||||
|
||||
export function generateS3Key() {
|
||||
let width = parseInt(__ENV.DIR_WIDTH || '0');
|
||||
let height = parseInt(__ENV.DIR_HEIGHT || '0');
|
||||
|
||||
let key = ''
|
||||
if (width > 0 && height > 0) {
|
||||
for (let index = 0; index < height; index++) {
|
||||
const w = Math.floor(Math.random() * width) + 1;
|
||||
key = key + 'dir' + w + '/';
|
||||
}
|
||||
}
|
||||
|
||||
key += objName();
|
||||
return key;
|
||||
}
|
||||
|
||||
const asciiLetters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890"
|
||||
|
||||
function objName() {
|
||||
if (__ENV.OBJ_NAME) {
|
||||
return __ENV.OBJ_NAME;
|
||||
}
|
||||
const length = parseInt(__ENV.OBJ_NAME_LENGTH || '0');
|
||||
if (length > 0) {
|
||||
let name = "";
|
||||
for (let i = 0; i < length; i++) {
|
||||
name += asciiLetters.charAt(Math.floor(Math.random() * asciiLetters.length));
|
||||
}
|
||||
return name;
|
||||
}
|
||||
return uuidv4();
|
||||
}
|
|
@ -15,7 +15,7 @@ def create_bucket(endpoint, versioning, location, acl, no_verify_ssl):
|
|||
cmd_line = f"aws {no_verify_ssl_str} s3api create-bucket --bucket {bucket_name} " \
|
||||
f"--endpoint {endpoint} {configuration} {acl} "
|
||||
cmd_line_ver = f"aws {no_verify_ssl_str} s3api put-bucket-versioning --bucket {bucket_name} " \
|
||||
f"--versioning-configuration Status=Enabled --endpoint {endpoint} {acl} "
|
||||
f"--versioning-configuration Status=Enabled --endpoint {endpoint}"
|
||||
|
||||
output, success = execute_cmd(cmd_line)
|
||||
|
||||
|
@ -25,7 +25,7 @@ def create_bucket(endpoint, versioning, location, acl, no_verify_ssl):
|
|||
f"Error: {output}", endpoint)
|
||||
return False
|
||||
|
||||
if versioning == "True":
|
||||
if versioning:
|
||||
output, success = execute_cmd(cmd_line_ver)
|
||||
if not success:
|
||||
log(f"{cmd_line_ver}\n"
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
import re
|
||||
from helpers.cmd import execute_cmd, log
|
||||
|
||||
def create_container(endpoint, policy, container_creation_retry, wallet_path, config, acl, local=False, retry=0):
|
||||
def create_container(endpoint, policy, container_creation_retry, wallet_path, config, rules, local=False, retry=0):
|
||||
if retry > int(container_creation_retry):
|
||||
raise ValueError(f"unable to create container: too many unsuccessful attempts")
|
||||
|
||||
|
@ -9,10 +9,8 @@ def create_container(endpoint, policy, container_creation_retry, wallet_path, co
|
|||
wallet_file = f"--wallet {wallet_path}"
|
||||
if config:
|
||||
wallet_config = f"--config {config}"
|
||||
if acl:
|
||||
acl_param = f"--basic-acl {acl}"
|
||||
cmd_line = f"frostfs-cli --rpc-endpoint {endpoint} container create {wallet_file} {wallet_config} " \
|
||||
f" --policy '{policy}' {acl_param} --await"
|
||||
f" --policy '{policy}' --await"
|
||||
|
||||
output, success = execute_cmd(cmd_line)
|
||||
|
||||
|
@ -36,6 +34,20 @@ def create_container(endpoint, policy, container_creation_retry, wallet_path, co
|
|||
|
||||
log(f"Created container: {cid} ({policy})", endpoint)
|
||||
|
||||
# Add rule for container
|
||||
if rules:
|
||||
r = ""
|
||||
for rule in rules:
|
||||
r += f" --rule '{rule}' "
|
||||
cmd_line = f"frostfs-cli --rpc-endpoint {endpoint} ape-manager add {wallet_file} {wallet_config} " \
|
||||
f" --chain-id 'chain-id' {r} --target-name '{cid}' --target-type 'container'"
|
||||
output, success = execute_cmd(cmd_line)
|
||||
if not success:
|
||||
log(f"{cmd_line}\n"
|
||||
f"Rule has not been added\n"
|
||||
f"{output}", endpoint)
|
||||
return False
|
||||
|
||||
if not local:
|
||||
return cid
|
||||
|
||||
|
@ -88,7 +100,7 @@ def create_container(endpoint, policy, container_creation_retry, wallet_path, co
|
|||
return cid
|
||||
|
||||
log(f"Created container {cid} is not stored on {endpoint}, creating another one...", endpoint)
|
||||
return create_container(endpoint, policy, container_creation_retry, wallet_path, config, acl, local, retry + 1)
|
||||
return create_container(endpoint, policy, container_creation_retry, wallet_path, config, rules, local, retry + 1)
|
||||
|
||||
|
||||
def upload_object(container, payload_filepath, endpoint, wallet_file, wallet_config):
|
||||
|
|
|
@ -16,6 +16,7 @@ ERROR_WRONG_CONTAINERS_COUNT = 1
|
|||
ERROR_WRONG_OBJECTS_COUNT = 2
|
||||
MAX_WORKERS = 50
|
||||
DEFAULT_POLICY = "REP 2 IN X CBF 2 SELECT 2 FROM * AS X"
|
||||
DEFAULT_RULES = ["allow Object.* *"]
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--size', help='Upload objects size in kb')
|
||||
|
@ -37,7 +38,10 @@ parser.add_argument('--workers', help='Count of workers in preset. Max = 50, Def
|
|||
parser.add_argument('--sleep', help='Time to sleep between containers creation and objects upload (in seconds), '
|
||||
'Default = 8', default=8)
|
||||
parser.add_argument('--local', help='Create containers that store data on provided endpoints. Warning: additional empty containers may be created.', action='store_true')
|
||||
parser.add_argument('--acl', help='Container ACL. Default is public-read-write.', default='public-read-write')
|
||||
parser.add_argument(
|
||||
'--rule',
|
||||
help='Rule attached to created containers. All entries of CONTAINER_ID will be replaced with id of created container.',
|
||||
action="append")
|
||||
|
||||
args: Namespace = parser.parse_args()
|
||||
print(args)
|
||||
|
@ -56,6 +60,9 @@ def main():
|
|||
wallet_config = args.config
|
||||
workers = int(args.workers)
|
||||
objects_per_container = int(args.preload_obj)
|
||||
rules = args.rule
|
||||
if not rules:
|
||||
rules = DEFAULT_RULES
|
||||
|
||||
ignore_errors = args.ignore_errors
|
||||
if args.update:
|
||||
|
@ -68,7 +75,7 @@ def main():
|
|||
containers_count = int(args.containers)
|
||||
print(f"Create containers: {containers_count}")
|
||||
with ProcessPoolExecutor(max_workers=min(MAX_WORKERS, workers)) as executor:
|
||||
containers_runs = [executor.submit(create_container, endpoint, policy, container_creation_retry, wallet, wallet_config, args.acl, args.local)
|
||||
containers_runs = [executor.submit(create_container, endpoint, policy, container_creation_retry, wallet, wallet_config, rules, args.local)
|
||||
for _, endpoint, policy in
|
||||
zip(range(containers_count), cycle(endpoints), cycle(args.policy))]
|
||||
|
||||
|
|
|
@ -13,6 +13,7 @@ from helpers.aws_cli import create_bucket, upload_object
|
|||
|
||||
ERROR_WRONG_CONTAINERS_COUNT = 1
|
||||
ERROR_WRONG_OBJECTS_COUNT = 2
|
||||
ERROR_WRONG_PERCENTAGE = 3
|
||||
MAX_WORKERS = 50
|
||||
DEFAULT_LOCATION = ""
|
||||
|
||||
|
@ -26,7 +27,8 @@ parser.add_argument('--endpoint', help='S3 Gateways addresses separated by comma
|
|||
parser.add_argument('--update', help='True/False, False by default. Save existed buckets from target file (--out). '
|
||||
'New buckets will not be created.')
|
||||
parser.add_argument('--location', help=f'AWS location constraint. Default is "{DEFAULT_LOCATION}"', action="append")
|
||||
parser.add_argument('--versioning', help='True/False, False by default.')
|
||||
parser.add_argument('--versioning', help='True/False, False by default. Alias of --buckets_versioned=100')
|
||||
parser.add_argument('--buckets_versioned', help='Percent of versioned buckets. Default is 0', default=0)
|
||||
parser.add_argument('--ignore-errors', help='Ignore preset errors', action='store_true')
|
||||
parser.add_argument('--no-verify-ssl', help='Ignore SSL verifications', action='store_true')
|
||||
parser.add_argument('--workers', help='Count of workers in preset. Max = 50, Default = 50', default=50)
|
||||
|
@ -62,8 +64,17 @@ def main():
|
|||
print(f"Create buckets: {buckets_count}")
|
||||
|
||||
with ProcessPoolExecutor(max_workers=min(MAX_WORKERS, workers)) as executor:
|
||||
buckets_runs = [executor.submit(create_bucket, endpoint, args.versioning, location, args.acl, no_verify_ssl)
|
||||
for _, endpoint, location in
|
||||
if not 0 <= int(args.buckets_versioned) <= 100:
|
||||
print(f"Percent of versioned buckets must be between 0 and 100: got {args.buckets_versioned}")
|
||||
if not ignore_errors:
|
||||
sys.exit(ERROR_WRONG_PERCENTAGE)
|
||||
if args.versioning == "True":
|
||||
versioning_per_bucket = [True] * buckets_count
|
||||
else:
|
||||
num_versioned_buckets = int((int(args.buckets_versioned) / 100) * buckets_count)
|
||||
versioning_per_bucket = [True] * num_versioned_buckets + [False] * (buckets_count - num_versioned_buckets)
|
||||
buckets_runs = [executor.submit(create_bucket, endpoint, versioning_per_bucket[i], location, args.acl, no_verify_ssl)
|
||||
for i, endpoint, location in
|
||||
zip(range(buckets_count), cycle(endpoints), cycle(args.location))]
|
||||
|
||||
for run in buckets_runs:
|
||||
|
|
|
@ -21,6 +21,7 @@ Scenarios `grpc.js`, `local.js`, `http.js` and `s3.js` support the following opt
|
|||
* `PAYLOAD_TYPE` - type of an object payload ("random" or "text", default: "random").
|
||||
* `STREAMING` - if set, the payload is generated on the fly and is not read into memory fully.
|
||||
* `METRIC_TAGS` - custom metrics tags (format `tag1:value1;tag2:value2`).
|
||||
* `DATE_FORMAT` - custom datetime format: `timeonly` (default), `datetime` or `none`.
|
||||
|
||||
Additionally, the profiling extension can be enabled to generate CPU and memory profiles which can be inspected with `go tool pprof file.prof`:
|
||||
```shell
|
||||
|
@ -125,7 +126,7 @@ The tests will use all pre-created buckets for PUT operations and all pre-create
|
|||
$ ./scenarios/preset/preset_s3.py --size 1024 --buckets 1 --out s3_1024kb.json --endpoint host1:8084 --preload_obj 500 --location load-1-4
|
||||
```
|
||||
* '--location' - specify the name of container policy (from policy.json file). It's important to run 'aws configure' each time when the policy file has been changed to pick up the latest policies.
|
||||
|
||||
* '--buckets_versioned' - specify the percentage of versioned buckets from the total number of created buckets. Default is 0
|
||||
3. Execute scenario with options:
|
||||
|
||||
```shell
|
||||
|
@ -138,6 +139,8 @@ Options (in addition to the common options):
|
|||
* `DELETE_AGE` - age of object in seconds before which it can not be deleted. This parameter can be used to control how many objects we have in the system under load.
|
||||
* `SLEEP_DELETE` - time interval (in seconds) between deleting VU iterations.
|
||||
* `OBJ_NAME` - if specified, this name will be used for all write operations instead of random generation.
|
||||
* `OBJ_NAME_LENGTH` - if specified, then name of the object will be generated with the specified length of ASCII characters.
|
||||
* `DIR_HEIGHT`, `DIR_WIDTH` - if both specified, object name will consist of `DIR_HEIGHT` directories, each of which can have `DIR_WIDTH` subdirectories, for example for `DIR_HEIGHT = 3, DIR_WIDTH = 100`, object names will be `/dir{1...100}/dir{1...100}/dir{1...100}/{uuid || OBJ_NAME}`
|
||||
|
||||
## S3 Multipart
|
||||
|
||||
|
|
|
@ -6,10 +6,10 @@ import registry from 'k6/x/frostfs/registry';
|
|||
import s3 from 'k6/x/frostfs/s3';
|
||||
import stats from 'k6/x/frostfs/stats';
|
||||
|
||||
import {newGenerator} from './libs/datagen.js';
|
||||
import {generateS3Key} from './libs/keygen.js';
|
||||
import {parseEnv} from './libs/env-parser.js';
|
||||
import {textSummary} from './libs/k6-summary-0.0.2.js';
|
||||
import {uuidv4} from './libs/k6-utils-1.4.0.js';
|
||||
import {newGenerator} from './libs/datagen.js';
|
||||
|
||||
parseEnv();
|
||||
|
||||
|
@ -159,7 +159,7 @@ export function obj_write() {
|
|||
sleep(__ENV.SLEEP_WRITE);
|
||||
}
|
||||
|
||||
const key = __ENV.OBJ_NAME || uuidv4();
|
||||
const key = generateS3Key();
|
||||
const bucket = bucket_list[Math.floor(Math.random() * bucket_list.length)];
|
||||
|
||||
const payload = generator.genPayload();
|
||||
|
|
|
@ -5,10 +5,10 @@ import registry from 'k6/x/frostfs/registry';
|
|||
import s3 from 'k6/x/frostfs/s3';
|
||||
import stats from 'k6/x/frostfs/stats';
|
||||
|
||||
import {generateS3Key} from './libs/keygen.js';
|
||||
import {newGenerator} from './libs/datagen.js';
|
||||
import {parseEnv} from './libs/env-parser.js';
|
||||
import {textSummary} from './libs/k6-summary-0.0.2.js';
|
||||
import {uuidv4} from './libs/k6-utils-1.4.0.js';
|
||||
|
||||
parseEnv();
|
||||
|
||||
|
@ -177,7 +177,7 @@ export function obj_write() {
|
|||
sleep(__ENV.SLEEP_WRITE);
|
||||
}
|
||||
|
||||
const key = __ENV.OBJ_NAME || uuidv4();
|
||||
const key = generateS3Key();
|
||||
const bucket = bucket_list[Math.floor(Math.random() * bucket_list.length)];
|
||||
|
||||
const payload = generator.genPayload();
|
||||
|
|
|
@ -6,10 +6,10 @@ import registry from 'k6/x/frostfs/registry';
|
|||
import s3 from 'k6/x/frostfs/s3';
|
||||
import stats from 'k6/x/frostfs/stats';
|
||||
|
||||
import {generateS3Key} from './libs/keygen.js';
|
||||
import {newGenerator} from './libs/datagen.js';
|
||||
import {parseEnv} from './libs/env-parser.js';
|
||||
import {textSummary} from './libs/k6-summary-0.0.2.js';
|
||||
import {uuidv4} from './libs/k6-utils-1.4.0.js';
|
||||
|
||||
parseEnv();
|
||||
|
||||
|
@ -159,7 +159,7 @@ export function obj_write() {
|
|||
sleep(__ENV.SLEEP_WRITE);
|
||||
}
|
||||
|
||||
const key = __ENV.OBJ_NAME || uuidv4();
|
||||
const key = generateS3Key();
|
||||
const bucket = bucket_list[Math.floor(Math.random() * bucket_list.length)];
|
||||
|
||||
const payload = generator.genPayload();
|
||||
|
|
|
@ -5,10 +5,10 @@ import registry from 'k6/x/frostfs/registry';
|
|||
import s3 from 'k6/x/frostfs/s3';
|
||||
import stats from 'k6/x/frostfs/stats';
|
||||
|
||||
import {generateS3Key} from './libs/keygen.js';
|
||||
import {newGenerator} from './libs/datagen.js';
|
||||
import {parseEnv} from './libs/env-parser.js';
|
||||
import {textSummary} from './libs/k6-summary-0.0.2.js';
|
||||
import {uuidv4} from './libs/k6-utils-1.4.0.js';
|
||||
|
||||
parseEnv();
|
||||
|
||||
|
@ -101,7 +101,7 @@ export function obj_write_multipart() {
|
|||
sleep(__ENV.SLEEP_WRITE);
|
||||
}
|
||||
|
||||
const key = __ENV.OBJ_NAME || uuidv4();
|
||||
const key = generateS3Key();
|
||||
const bucket = bucket_list[Math.floor(Math.random() * bucket_list.length)];
|
||||
|
||||
const payload = generator.genPayload();
|
||||
|
|
|
@ -5,6 +5,7 @@ import registry from 'k6/x/frostfs/registry';
|
|||
import s3local from 'k6/x/frostfs/s3local';
|
||||
import stats from 'k6/x/frostfs/stats';
|
||||
|
||||
import {generateS3Key} from './libs/keygen.js';
|
||||
import {newGenerator} from './libs/datagen.js';
|
||||
import {parseEnv} from './libs/env-parser.js';
|
||||
import {textSummary} from './libs/k6-summary-0.0.2.js';
|
||||
|
@ -131,7 +132,7 @@ export function handleSummary(data) {
|
|||
}
|
||||
|
||||
export function obj_write() {
|
||||
const key = __ENV.OBJ_NAME || uuidv4();
|
||||
const key = generateS3Key();
|
||||
const bucket = bucket_list[Math.floor(Math.random() * bucket_list.length)];
|
||||
|
||||
const payload = generator.genPayload();
|
||||
|
|
Loading…
Reference in a new issue