Compare commits

..

1 commit

Author SHA1 Message Date
0addc11a78 [#13] Allow to use english text in the payload
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2023-03-10 10:35:28 +03:00
38 changed files with 125 additions and 3227 deletions

View file

@ -2,7 +2,7 @@
name: Bug report
about: Create a report to help us improve
title: ''
labels: community, triage, bug
labels: community, triage
assignees: ''
---
@ -18,11 +18,8 @@ assignees: ''
If suggesting a change/improvement, explain the difference from current behavior -->
## Possible Solution
<!-- Not obligatory
If no reason/fix/additions for the bug can be suggested,
uncomment the following phrase:
No fix can be suggested by a QA engineer. Further solutions shall be up to developers. -->
<!-- Not obligatory, but suggest a fix/reason for the bug,
or ideas how to implement the addition or change -->
## Steps to Reproduce (for bugs)
<!-- Provide a link to a live example, or an unambiguous set of steps

View file

@ -8,13 +8,13 @@ assignees: ''
---
## Is your feature request related to a problem? Please describe.
<!--- A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] -->
<!-- A clear and concise description of what the problem is. Ex. I'm always frustrated when ... -->
## Describe the solution you'd like
<!--- A clear and concise description of what you want to happen. -->
<!-- A clear and concise description of what you want to happen. -->
## Describe alternatives you've considered
<!--- A clear and concise description of any alternative solutions or features you've considered. -->
<!-- A clear and concise description of any alternative solutions or features you've considered. -->
## Additional context
<!--- Add any other context or screenshots about the feature request here. -->
<!-- Add any other context or screenshots about the feature request here. -->

View file

@ -69,26 +69,6 @@ const frostfs_cli = native.connect("s01.frostfs.devenv:8080", "", 0, 0)
It returns dictionary with `success` boolean flag, `object_id` string and
`error` string.
## Local
Create a local client with `connect` method. Arguments:
- local path to frostfs storage node configuration file
- hex encoded private key (empty value produces random key)
- whether to use the debug logger (warning: very verbose)
```js
import local from 'k6/x/frostfs/local';
const local_client = local.connect("/path/to/config.yaml", "", false)
```
### Methods
- `put(container_id, headers, payload)`. Returns dictionary with `success`
boolean flag, `object_id` string, and `error` string.
- `get(container_id, object_id)`. Returns dictionary with `success` boolean
flag, and `error` string.
- `delete(container_id, object_id)`. Returns dictionary with `success` boolean
flag, and `error` string.
## S3
Create s3 client with `connect` method. Arguments:
@ -118,34 +98,9 @@ const s3_cli = s3.connect("http://s3.frostfs.devenv:8080", {'no_verify_ssl': 'tr
- `get(bucket, key)`. Returns dictionary with `success` boolean flag and `error`
string.
## S3 Local
Create local s3 client with `connect` method. Arguments:
- local path to frostfs storage node configuration file
- parameter map with the following options:
* `hex_key`: private key to use as a hexadecimal string. A random one is created if none is provided.
* `node_position`: position of this node in the node array if loading multiple nodes independently (default: 0).
* `node_count`: number of nodes in the node array if loading multiple nodes independently (default: 1).
* `debug_logger`: whether to use the development logger instead of the default. Helpful for debugging (default: false).
- bucket-container mapping, which is needed to resolve the container id for a given bucket name. Any bucket
used by the client must have an entry here.
```js
import local from 'k6/x/frostfs/local';
const params = {'node_position': 1, 'node_count': 3}
const bucketMapping = {'mytestbucket': 'GBQDDUM1hdodXmiRHV57EUkFWJzuntsG8BG15wFSwam6'}
const local_client = local.connect("/path/to/config.yaml", params, bucketMapping)
```
### Methods
- `put(bucket, key, payload)`. Returns dictionary with `success` boolean flag
and `error` string.
- `get(bucket, key)`. Returns dictionary with `success` boolean flag and `error`
string.
# Examples
See native protocol and s3 test suite examples in [examples](./examples) dir.
See native protocol and s3 test suit examples in [examples](./examples) dir.
# License

View file

@ -1,24 +0,0 @@
import local from 'k6/x/frostfs/local';
import { uuidv4 } from '../scenarios/libs/k6-utils-1.4.0.js';
const payload = open('../go.sum', 'b');
const local_cli = local.connect("/path/to/config.yaml", "", false)
export const options = {
stages: [
{duration: '30s', target: 10},
],
};
export default function () {
let headers = {
'unique_header': uuidv4()
}
const container_id = '6BVPPXQewRJ6J5EYmAPLczXxNocS7ikyF7amS2esWQnb';
let resp = local_cli.put(container_id, headers, payload)
if (resp.success) {
local_cli.get(container_id, resp.object_id)
} else {
console.log(resp.error)
}
}

View file

@ -1,6 +1,6 @@
import {uuidv4} from 'https://jslib.k6.io/k6-utils/1.2.0/index.js';
import {fail} from "k6";
import native from 'k6/x/frostfs/native';
import { fail } from "k6";
import { uuidv4 } from '../scenarios/libs/k6-utils-1.4.0.js';
const payload = open('../go.sum', 'b');
const frostfs_cli = native.connect("s01.frostfs.devenv:8080", "1dd37fba80fec4e6a6f13fd708d8dcb3b29def768017052f6c930fa1c5d90bbb", 0, 0)

View file

@ -1,5 +1,5 @@
import { uuidv4 } from 'https://jslib.k6.io/k6-utils/1.2.0/index.js';
import native from 'k6/x/frostfs/native';
import { uuidv4 } from '../scenarios/libs/k6-utils-1.4.0.js';
const payload = open('../go.sum', 'b');
const container = "AjSxSNNXbJUDPqqKYm1VbFVDGCakbpUNH8aGjPmGAH3B"

View file

@ -1,6 +1,6 @@
import {uuidv4} from 'https://jslib.k6.io/k6-utils/1.2.0/index.js';
import {fail} from 'k6'
import s3 from 'k6/x/frostfs/s3';
import { fail } from 'k6'
import { uuidv4 } from '../scenarios/libs/k6-utils-1.4.0.js';
const payload = open('../go.sum', 'b');
const bucket = "cats"

View file

@ -1,15 +0,0 @@
import s3local from 'k6/x/frostfs/s3local';
import { uuidv4 } from '../scenarios/libs/k6-utils-1.4.0.js';
const bucket = "testbucket"
const payload = open('../go.sum', 'b');
const s3local_cli = s3local.connect("path/to/storage/config.yml", {}, {
'testbucket': 'GBQDDUM1hdodXmiRHV57EUkFWJzuntsG8BG15wFSwam6',
});
export default function () {
const key = uuidv4();
if (s3local_cli.put(bucket, key, payload).success) {
s3local_cli.get(bucket, key)
}
}

View file

@ -2,13 +2,10 @@ package xk6_frostfs
import (
_ "git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/datagen"
_ "git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/env"
_ "git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/local"
_ "git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/logging"
_ "git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/native"
_ "git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/registry"
_ "git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/s3"
_ "git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/s3local"
"go.k6.io/k6/js/modules"
)

62
go.mod
View file

@ -3,32 +3,27 @@ module git.frostfs.info/TrueCloudLab/xk6-frostfs
go 1.17
require (
git.frostfs.info/TrueCloudLab/frostfs-node v0.22.2-0.20230313113918-4e244686cf03
git.frostfs.info/TrueCloudLab/frostfs-s3-gw v0.24.1-0.20230403110435-01afa1cae425
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20230329125804-552219b8e130
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20230307124721-94476f905599
git.frostfs.info/TrueCloudLab/tzhash v1.8.0
github.com/aws/aws-sdk-go-v2 v1.16.3
github.com/aws/aws-sdk-go-v2/config v1.15.5
github.com/aws/aws-sdk-go-v2/service/s3 v1.26.9
github.com/dop251/goja v0.0.0-20220405120441-9037c2b61cbf
github.com/go-loremipsum/loremipsum v1.1.3
github.com/google/uuid v1.3.0
github.com/joho/godotenv v1.5.1
github.com/nspcc-dev/neo-go v0.101.0
github.com/panjf2000/ants/v2 v2.5.0
github.com/nspcc-dev/neo-go v0.100.1
github.com/sirupsen/logrus v1.8.1
github.com/stretchr/testify v1.8.1
go.etcd.io/bbolt v1.3.6
go.k6.io/k6 v0.38.2
go.uber.org/zap v1.24.0
)
require (
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.11.2-0.20230315095236-9dc375346703 // indirect
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.11.2-0.20230307104236-f69d2ad83c51 // indirect
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 // indirect
git.frostfs.info/TrueCloudLab/hrw v1.2.0 // indirect
git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0 // indirect
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20221202181307-76fa05c21b12 // indirect
github.com/aws/aws-sdk-go v1.44.6 // indirect
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.1 // indirect
github.com/aws/aws-sdk-go-v2/credentials v1.12.0 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.4 // indirect
@ -43,65 +38,34 @@ require (
github.com/aws/aws-sdk-go-v2/service/sso v1.11.4 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.16.4 // indirect
github.com/aws/smithy-go v1.11.2 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/bluele/gcache v0.0.2 // indirect
github.com/cespare/xxhash/v2 v2.1.2 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 // indirect
github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91 // indirect
github.com/fatih/color v1.13.0 // indirect
github.com/fsnotify/fsnotify v1.6.0 // indirect
github.com/go-sourcemap/sourcemap v2.1.4-0.20211119122758-180fcef48034+incompatible // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/gorilla/mux v1.8.0 // indirect
github.com/hashicorp/golang-lru v0.6.0 // indirect
github.com/hashicorp/golang-lru/v2 v2.0.1 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/klauspost/compress v1.15.13 // indirect
github.com/magiconair/properties v1.8.7 // indirect
github.com/kr/pretty v0.3.0 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/mattn/go-colorable v0.1.12 // indirect
github.com/mattn/go-isatty v0.0.14 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
github.com/minio/highwayhash v1.0.2 // indirect
github.com/minio/sio v0.3.0 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/mr-tron/base58 v1.2.0 // indirect
github.com/nats-io/jwt/v2 v2.4.1 // indirect
github.com/nats-io/nats.go v1.22.1 // indirect
github.com/nats-io/nkeys v0.4.4 // indirect
github.com/nats-io/nuid v1.0.1 // indirect
github.com/nspcc-dev/rfc6979 v0.2.0 // indirect
github.com/nxadm/tail v1.4.8 // indirect
github.com/oxtoacart/bpool v0.0.0-20190530202638-03653db5a59c // indirect
github.com/pelletier/go-toml/v2 v2.0.6 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_golang v1.13.0 // indirect
github.com/prometheus/client_model v0.2.0 // indirect
github.com/prometheus/common v0.37.0 // indirect
github.com/prometheus/procfs v0.8.0 // indirect
github.com/serenize/snaker v0.0.0-20201027110005-a7ad2135616e // indirect
github.com/spaolacci/murmur3 v1.1.0 // indirect
github.com/spf13/afero v1.9.3 // indirect
github.com/spf13/cast v1.5.0 // indirect
github.com/spf13/jwalterweatherman v1.1.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/spf13/viper v1.15.0 // indirect
github.com/subosito/gotenv v1.4.2 // indirect
go.uber.org/atomic v1.10.0 // indirect
go.uber.org/multierr v1.9.0 // indirect
golang.org/x/crypto v0.7.0 // indirect
github.com/spf13/afero v1.1.2 // indirect
golang.org/x/crypto v0.4.0 // indirect
golang.org/x/exp v0.0.0-20221227203929-1b447090c38c // indirect
golang.org/x/net v0.8.0 // indirect
golang.org/x/sync v0.1.0 // indirect
golang.org/x/sys v0.6.0 // indirect
golang.org/x/text v0.8.0 // indirect
golang.org/x/time v0.1.0 // indirect
google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef // indirect
google.golang.org/grpc v1.52.0 // indirect
golang.org/x/net v0.3.0 // indirect
golang.org/x/sys v0.3.0 // indirect
golang.org/x/text v0.5.0 // indirect
golang.org/x/time v0.0.0-20220224211638-0e9765cccd65 // indirect
google.golang.org/genproto v0.0.0-20200903010400-9bfcb5116336 // indirect
google.golang.org/grpc v1.48.0 // indirect
google.golang.org/protobuf v1.28.1 // indirect
gopkg.in/guregu/null.v3 v3.3.0 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)

972
go.sum

File diff suppressed because it is too large Load diff

View file

@ -1,6 +1,8 @@
package datagen
import (
"strings"
"go.k6.io/k6/js/modules"
)
@ -36,7 +38,7 @@ func (d *Datagen) Exports() modules.Exports {
return modules.Exports{Default: d}
}
func (d *Datagen) Generator(size int) *Generator {
g := NewGenerator(d.vu, size)
func (d *Datagen) Generator(size int, typ string) *Generator {
g := NewGenerator(d.vu, size, strings.ToLower(typ))
return &g
}

View file

@ -1,12 +1,14 @@
package datagen
import (
"bytes"
"crypto/sha256"
"encoding/hex"
"math/rand"
"time"
"github.com/dop251/goja"
"github.com/go-loremipsum/loremipsum"
"go.k6.io/k6/js/modules"
)
@ -24,6 +26,7 @@ type (
size int
rand *rand.Rand
buf []byte
typ string
offset int
}
@ -36,20 +39,33 @@ type (
// TailSize specifies number of extra random bytes in the buffer tail.
const TailSize = 1024
func NewGenerator(vu modules.VU, size int) Generator {
var payloadTypes = []string{
"text",
"random",
"",
}
func NewGenerator(vu modules.VU, size int, typ string) Generator {
if size <= 0 {
panic("size should be positive")
}
r := rand.New(rand.NewSource(time.Now().UnixNano()))
buf := make([]byte, size+TailSize)
r.Read(buf)
var found bool
for i := range payloadTypes {
if payloadTypes[i] == typ {
found = true
}
}
if !found {
vu.InitEnv().Logger.Info("Unknown payload type '%s', random will be used.", typ)
}
return Generator{
vu: vu,
size: size,
rand: r,
buf: buf,
rand: rand.New(rand.NewSource(time.Now().UnixNano())),
buf: make([]byte, size+TailSize),
typ: typ,
}
return Generator{vu: vu, size: size, buf: nil, typ: typ, offset: 0}
}
func (g *Generator) GenPayload(calcHash bool) GenPayloadResponse {
@ -66,9 +82,20 @@ func (g *Generator) GenPayload(calcHash bool) GenPayloadResponse {
}
func (g *Generator) nextSlice() []byte {
if g.offset >= TailSize {
g.offset = 0
g.rand.Read(g.buf) // Per docs, err is always nil here
if g.offset+g.size >= len(g.buf) {
switch g.typ {
case "text":
li := loremipsum.New()
b := bytes.NewBuffer(g.buf[:0])
for b.Len() < g.size+TailSize {
b.WriteString(li.Paragraph())
b.WriteRune('\n')
}
g.buf = b.Bytes()
default:
g.offset = 0
g.rand.Read(g.buf) // Per docs, err is always nil here
}
}
result := g.buf[g.offset : g.offset+g.size]

View file

@ -16,25 +16,25 @@ func TestGenerator(t *testing.T) {
t.Run("fails on negative size", func(t *testing.T) {
require.Panics(t, func() {
_ = NewGenerator(vu, -1)
_ = NewGenerator(vu, -1, "")
})
})
t.Run("fails on zero size", func(t *testing.T) {
require.Panics(t, func() {
_ = NewGenerator(vu, 0)
_ = NewGenerator(vu, 0, "")
})
})
t.Run("creates slice of specified size", func(t *testing.T) {
size := 10
g := NewGenerator(vu, size)
g := NewGenerator(vu, size, "")
slice := g.nextSlice()
require.Len(t, slice, size)
})
t.Run("creates a different slice on each call", func(t *testing.T) {
g := NewGenerator(vu, 1000)
g := NewGenerator(vu, 1000, "")
slice1 := g.nextSlice()
slice2 := g.nextSlice()
// Each slice should be unique (assuming that 1000 random bytes will never coincide
@ -43,7 +43,7 @@ func TestGenerator(t *testing.T) {
})
t.Run("keeps generating slices after consuming entire tail", func(t *testing.T) {
g := NewGenerator(vu, 1000)
g := NewGenerator(vu, 1000, "")
initialSlice := g.nextSlice()
for i := 0; i < TailSize; i++ {
g.nextSlice()

View file

@ -1,50 +0,0 @@
package env
import (
"os"
"github.com/joho/godotenv"
"go.k6.io/k6/js/modules"
)
// RootModule is the global module object type. It is instantiated once per test
// run and will be used to create k6/x/frostfs/registry module instances for each VU.
type RootModule struct{}
// Parser represents an instance of the module for every VU.
type Parser struct {
vu modules.VU
}
// Ensure the interfaces are implemented correctly.
var (
_ modules.Instance = &Parser{}
_ modules.Module = &RootModule{}
)
func init() {
modules.Register("k6/x/frostfs/env", new(RootModule))
}
// NewModuleInstance implements the modules.Module interface and returns
// a new instance for each VU.
func (r *RootModule) NewModuleInstance(vu modules.VU) modules.Instance {
mi := &Parser{vu: vu}
return mi
}
// Exports implements the modules.Instance interface and returns the exports
// of the JS module.
func (p *Parser) Exports() modules.Exports {
return modules.Exports{Default: p}
}
func (p *Parser) Parse(fileName string) (map[string]string, error) {
f, err := os.Open(fileName)
if err != nil {
return nil, err
}
defer f.Close()
return godotenv.Parse(f)
}

View file

@ -1,73 +0,0 @@
package local
import (
"fmt"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/local/rawclient"
"github.com/dop251/goja"
)
type Client struct {
rc *rawclient.RawClient
}
type (
SuccessOrErrorResponse struct {
Success bool
Error string
}
PutResponse struct {
Success bool
ObjectID string
Error string
}
GetResponse SuccessOrErrorResponse
DeleteResponse SuccessOrErrorResponse
)
func (c *Client) Put(containerID string, headers map[string]string, payload goja.ArrayBuffer) PutResponse {
id, err := c.rc.Put(mustParseContainerID(containerID), nil, headers, payload.Bytes())
if err != nil {
return PutResponse{Error: err.Error()}
}
return PutResponse{
Success: true,
ObjectID: id.EncodeToString(),
}
}
func (c *Client) Get(containerID, objectID string) GetResponse {
if _, err := c.rc.Get(mustParseContainerID(containerID), mustParseObjectID(objectID)); err != nil {
return GetResponse{Error: err.Error()}
}
return GetResponse{Success: true}
}
func (c *Client) Delete(containerID, objectID string) DeleteResponse {
if err := c.rc.Delete(mustParseContainerID(containerID), mustParseObjectID(objectID)); err != nil {
return DeleteResponse{Error: err.Error()}
}
return DeleteResponse{Success: true}
}
func mustParseContainerID(strContainerID string) cid.ID {
var containerID cid.ID
err := containerID.DecodeString(strContainerID)
if err != nil {
panic(fmt.Sprintf("parsing container id %q: %v", strContainerID, err))
}
return containerID
}
func mustParseObjectID(strObjectID string) oid.ID {
var cliObjectID oid.ID
err := cliObjectID.DecodeString(strObjectID)
if err != nil {
panic(fmt.Sprintf("parsing object id %q: %v", strObjectID, err))
}
return cliObjectID
}

View file

@ -1,367 +0,0 @@
package local
import (
"context"
"errors"
"fmt"
"sync"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
engineconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine"
shardconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard"
blobovniczaconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/blobovnicza"
fstreeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/fstree"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
metabase "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/local/rawclient"
"git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/stats"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/panjf2000/ants/v2"
"go.etcd.io/bbolt"
"go.k6.io/k6/js/modules"
"go.k6.io/k6/metrics"
"go.uber.org/zap"
"golang.org/x/sys/unix"
)
// RootModule is the global module object type. It is instantiated once per test
// run and will be used to create k6/x/frostfs/local module instances for each VU.
type RootModule struct {
mu sync.Mutex
// configFile is the name of the configuration file used during one test.
configFile string
// ng is the engine instance used during one test, corresponding to the configFile. Each VU
// gets the same engine instance.
ng *engine.StorageEngine
}
// Local represents an instance of the module for every VU.
type Local struct {
vu modules.VU
ResolveEngine func(context.Context, string, bool) (*engine.StorageEngine, error)
}
// Ensure the interfaces are implemented correctly.
var (
_ modules.Module = &RootModule{}
_ modules.Instance = &Local{}
objPutTotal, objPutFails, objPutDuration *metrics.Metric
objGetTotal, objGetFails, objGetDuration *metrics.Metric
objDeleteTotal, objDeleteFails, objDeleteDuration *metrics.Metric
)
func init() {
modules.Register("k6/x/frostfs/local", &RootModule{})
}
// NewModuleInstance implements the modules.Module interface and returns
// a new instance for each VU.
func (r *RootModule) NewModuleInstance(vu modules.VU) modules.Instance {
return NewLocalModuleInstance(vu, r.GetOrCreateEngine)
}
func NewLocalModuleInstance(vu modules.VU, resolveEngine func(context.Context, string, bool) (*engine.StorageEngine, error)) *Local {
return &Local{
vu: vu,
ResolveEngine: resolveEngine,
}
}
// checkResourceLimits checks the current limit on NOFILE.
//
// The usual default is around 1024 and this is too low for production clusters where a value of
// about 65536 is needed in order to not run into errors because of attempting to open too many files.
// This is needed for the local storage engine scenarios, where the user running the scenario is not
// necessarily the service user, for which the limits are preconfigured correctly.
//
// See: https://k6.io/docs/misc/fine-tuning-os/
func checkResourceLimits() error {
const (
minNofileLimit = 1 << 16
)
rlimit := &unix.Rlimit{}
if err := unix.Getrlimit(unix.RLIMIT_NOFILE, rlimit); err != nil {
return fmt.Errorf("getting resource limits: %v", err)
}
if rlimit.Cur < minNofileLimit {
return fmt.Errorf("nofile limit is too low: %d", rlimit.Cur)
}
return nil
}
// GetOrCreateEngine returns the current engine instance for the given configuration file,
// creating a new one if none exists. Note that the identity of configuration files is their
// file name for the purposes of test runs.
func (r *RootModule) GetOrCreateEngine(ctx context.Context, configFile string, debug bool) (*engine.StorageEngine, error) {
r.mu.Lock()
defer r.mu.Unlock()
if len(configFile) == 0 {
return nil, errors.New("configFile cannot be empty")
}
// Create and initialize engine for the given configFile if it doesn't exist already
if r.ng == nil {
r.configFile = configFile
appCfg := config.New(config.Prm{}, config.WithConfigFile(configFile))
ngOpts, shardOpts, err := storageEngineOptionsFromConfig(appCfg, debug)
if err != nil {
return nil, fmt.Errorf("creating engine options from config: %v", err)
}
if err := checkResourceLimits(); err != nil {
return nil, err
}
r.ng = engine.New(ngOpts...)
for i, opts := range shardOpts {
if _, err := r.ng.AddShard(opts...); err != nil {
return nil, fmt.Errorf("adding shard %d: %v", i, err)
}
}
if err := r.ng.Open(); err != nil {
return nil, fmt.Errorf("opening engine: %v", err)
}
if err := r.ng.Init(); err != nil {
return nil, fmt.Errorf("initializing engine: %v", err)
}
} else if configFile != r.configFile {
return nil, fmt.Errorf("GetOrCreateEngine called with mismatching configFile after engine was initialized: got %q, want %q", configFile, r.configFile)
}
return r.ng, nil
}
// Exports implements the modules.Instance interface and returns the exports
// of the JS module.
func (s *Local) Exports() modules.Exports {
return modules.Exports{Default: s}
}
func (s *Local) VU() modules.VU { return s.vu }
func (s *Local) Connect(configFile, hexKey string, debug bool) (*Client, error) {
ng, err := s.ResolveEngine(s.VU().Context(), configFile, debug)
if err != nil {
return nil, fmt.Errorf("connecting to engine for config %q: %v", configFile, err)
}
key, err := ParseOrCreateKey(hexKey)
if err != nil {
return nil, fmt.Errorf("creating key: %v", err)
}
// Register metrics.
registry := metrics.NewRegistry()
objPutTotal, _ = registry.NewMetric("local_obj_put_total", metrics.Counter)
objPutFails, _ = registry.NewMetric("local_obj_put_fails", metrics.Counter)
objPutDuration, _ = registry.NewMetric("local_obj_put_duration", metrics.Trend, metrics.Time)
objGetTotal, _ = registry.NewMetric("local_obj_get_total", metrics.Counter)
objGetFails, _ = registry.NewMetric("local_obj_get_fails", metrics.Counter)
objGetDuration, _ = registry.NewMetric("local_obj_get_duration", metrics.Trend, metrics.Time)
objDeleteTotal, _ = registry.NewMetric("local_obj_delete_total", metrics.Counter)
objDeleteFails, _ = registry.NewMetric("local_obj_delete_fails", metrics.Counter)
objDeleteDuration, _ = registry.NewMetric("local_obj_delete_duration", metrics.Trend, metrics.Time)
// Create raw client backed by local storage engine.
rc := rawclient.New(ng,
rawclient.WithKey(key.PrivateKey),
rawclient.WithPutHandler(func(sz uint64, err error, dt time.Duration) {
if err != nil {
stats.Report(s.vu, objPutFails, 1)
} else {
stats.Report(s.vu, objPutTotal, 1)
stats.ReportDataSent(s.vu, float64(sz))
stats.Report(s.vu, objPutDuration, metrics.D(dt))
}
}),
rawclient.WithGetHandler(func(sz uint64, err error, dt time.Duration) {
if err != nil {
stats.Report(s.vu, objGetFails, 1)
} else {
stats.Report(s.vu, objGetTotal, 1)
stats.Report(s.vu, objGetDuration, metrics.D(dt))
stats.ReportDataReceived(s.vu, float64(sz))
}
}),
rawclient.WithDeleteHandler(func(err error, dt time.Duration) {
if err != nil {
stats.Report(s.vu, objDeleteFails, 1)
} else {
stats.Report(s.vu, objDeleteTotal, 1)
stats.Report(s.vu, objDeleteDuration, metrics.D(dt))
}
}),
)
return &Client{rc}, nil
}
type epochState struct{}
func (epochState) CurrentEpoch() uint64 { return 0 }
// storageEngineOptionsFromConfig loads a configuration file and returns the corresponding
// engine and shard options to recreate an engine usable with an existing storage instance.
// This makes sure that the local loader uses the same engine configuration as the one that
// preloaded the storage (if any), by using the same configuration file.
//
// Note that the configuration file only needs to contain the storage-specific sections.
func storageEngineOptionsFromConfig(c *config.Config, debug bool) ([]engine.Option, [][]shard.Option, error) {
log := zap.L()
if debug {
var err error
log, err = zap.NewDevelopment()
if err != nil {
return nil, nil, fmt.Errorf("creating development logger: %v", err)
}
}
ngOpts := []engine.Option{
engine.WithErrorThreshold(engineconfig.ShardErrorThreshold(c)),
engine.WithShardPoolSize(engineconfig.ShardPoolSize(c)),
engine.WithLogger(&logger.Logger{Logger: log}),
}
var shOpts [][]shard.Option
engineconfig.IterateShards(c, false, func(sc *shardconfig.Config) error {
opts := []shard.Option{
shard.WithRefillMetabase(sc.RefillMetabase()),
shard.WithMode(sc.Mode()),
shard.WithLogger(&logger.Logger{Logger: log}),
}
// substorages
{
var substorages []blobstor.SubStorage
for _, scfg := range sc.BlobStor().Storages() {
switch scfg.Type() {
case blobovniczatree.Type:
cfg := blobovniczaconfig.From((*config.Config)(scfg))
ss := blobstor.SubStorage{
Storage: blobovniczatree.NewBlobovniczaTree(
blobovniczatree.WithRootPath(scfg.Path()),
blobovniczatree.WithPermissions(scfg.Perm()),
blobovniczatree.WithBlobovniczaSize(cfg.Size()),
blobovniczatree.WithBlobovniczaShallowDepth(cfg.ShallowDepth()),
blobovniczatree.WithBlobovniczaShallowWidth(cfg.ShallowWidth()),
blobovniczatree.WithOpenedCacheSize(cfg.OpenedCacheSize()),
blobovniczatree.WithLogger(&logger.Logger{Logger: log}),
),
Policy: func(_ *objectSDK.Object, data []byte) bool {
return uint64(len(data)) < sc.SmallSizeLimit()
},
}
substorages = append(substorages, ss)
case fstree.Type:
cfg := fstreeconfig.From((*config.Config)(scfg))
ss := blobstor.SubStorage{
Storage: fstree.New(
fstree.WithPath(scfg.Path()),
fstree.WithPerm(scfg.Perm()),
fstree.WithDepth(cfg.Depth()),
fstree.WithNoSync(cfg.NoSync()),
),
Policy: func(_ *objectSDK.Object, data []byte) bool {
return true
},
}
substorages = append(substorages, ss)
default:
return fmt.Errorf("invalid storage type: %s", scfg.Type())
}
}
opts = append(opts, shard.WithBlobStorOptions(
blobstor.WithCompressObjects(sc.Compress()),
blobstor.WithUncompressableContentTypes(sc.UncompressableContentTypes()),
blobstor.WithStorages(substorages),
blobstor.WithLogger(&logger.Logger{Logger: log}),
))
}
// write cache
if wc := sc.WriteCache(); wc.Enabled() {
opts = append(opts, shard.WithWriteCacheOptions(
writecache.WithPath(wc.Path()),
writecache.WithMaxBatchSize(wc.BoltDB().MaxBatchSize()),
writecache.WithMaxBatchDelay(wc.BoltDB().MaxBatchDelay()),
writecache.WithMaxObjectSize(wc.MaxObjectSize()),
writecache.WithSmallObjectSize(wc.SmallObjectSize()),
writecache.WithFlushWorkersCount(wc.WorkersNumber()),
writecache.WithMaxCacheSize(wc.SizeLimit()),
writecache.WithNoSync(wc.NoSync()),
writecache.WithLogger(&logger.Logger{Logger: log}),
))
}
// tree
if config.BoolSafe(c.Sub("tree"), "enabled") {
pr := sc.Pilorama()
opts = append(opts, shard.WithPiloramaOptions(
pilorama.WithPath(pr.Path()),
pilorama.WithPerm(pr.Perm()),
pilorama.WithMaxBatchSize(pr.MaxBatchSize()),
pilorama.WithMaxBatchDelay(pr.MaxBatchDelay()),
pilorama.WithNoSync(pr.NoSync()),
))
}
// metabase
{
mb := sc.Metabase()
opts = append(opts, shard.WithMetaBaseOptions(
metabase.WithPath(mb.Path()),
metabase.WithPermissions(mb.BoltDB().Perm()),
metabase.WithMaxBatchSize(mb.BoltDB().MaxBatchSize()),
metabase.WithMaxBatchDelay(mb.BoltDB().MaxBatchDelay()),
metabase.WithBoltDBOptions(&bbolt.Options{
Timeout: 1 * time.Second,
}),
metabase.WithEpochState(epochState{}),
metabase.WithLogger(&logger.Logger{Logger: log}),
))
}
// GC
{
gc := sc.GC()
opts = append(opts,
shard.WithGCRemoverSleepInterval(gc.RemoverSleepInterval()),
shard.WithRemoverBatchSize(gc.RemoverBatchSize()),
shard.WithGCWorkerPoolInitializer(func(sz int) util.WorkerPool {
pool, err := ants.NewPool(sz)
if err != nil {
panic(err)
}
return pool
}),
)
}
shOpts = append(shOpts, opts)
return nil
})
return ngOpts, shOpts, nil
}
// ParseOrCreateKey parses the provided key as a hex string or creates a fresh one if empty.
func ParseOrCreateKey(hexKeyStr string) (*keys.PrivateKey, error) {
if hexKeyStr != "" {
return keys.NewPrivateKeyFromHex(hexKeyStr)
}
return keys.NewPrivateKey()
}

View file

@ -1,46 +0,0 @@
package rawclient
import (
"crypto/ecdsa"
"time"
)
type (
PutHandler func(uint64, error, time.Duration)
GetHandler func(uint64, error, time.Duration)
DeleteHandler func(error, time.Duration)
)
type config struct {
key ecdsa.PrivateKey
onPut PutHandler
onGet GetHandler
onDelete DeleteHandler
}
type Option func(*config)
func defaultConfig() *config {
return &config{
onPut: func(uint64, error, time.Duration) {},
onGet: func(uint64, error, time.Duration) {},
onDelete: func(error, time.Duration) {},
}
}
// WithKey sets the private key used by the raw client if no other key
// is available when setting owner IDs.
// Required.
func WithKey(key ecdsa.PrivateKey) Option { return func(c *config) { c.key = key } }
// WithPutHandler sets the hook invoked on completion of Put calls.
// This is useful for updating metrics or debugging.
func WithPutHandler(h PutHandler) Option { return func(c *config) { c.onPut = h } }
// WithGetHandler sets the hook invoked on completion of Get calls.
// This is useful for updating metrics or debugging.
func WithGetHandler(h GetHandler) Option { return func(c *config) { c.onGet = h } }
// WithDeleteHandler sets the hook invoked on completion of Delete calls.
// This is useful for updating metrics or debugging.
func WithDeleteHandler(h DeleteHandler) Option { return func(c *config) { c.onDelete = h } }

View file

@ -1,120 +0,0 @@
// Package rawclient provides a basic interface to the local storage engine.
// It can be used as a base for more complex load clients backed by local storage.
package rawclient
import (
"fmt"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
)
// RawClient is a client to the local storage engine instance.
type RawClient struct {
*config
ng *engine.StorageEngine
ownerID *user.ID
}
// New returns a RawClient from the provided options.
func New(ng *engine.StorageEngine, opts ...Option) *RawClient {
cfg := defaultConfig()
for _, opt := range opts {
opt(cfg)
}
client := &RawClient{cfg, ng, &user.ID{}}
user.IDFromKey(client.ownerID, client.key.PublicKey)
return client
}
func (c *RawClient) Put(containerID cid.ID, ownerID *user.ID, headers map[string]string, payload []byte) (oid.ID, error) {
sz := len(payload)
attrs := make([]object.Attribute, len(headers))
{
ind := 0
for k, v := range headers {
attrs[ind].SetKey(k)
attrs[ind].SetValue(v)
ind++
}
}
// Note that the key is a required option, so this is never empty.
if ownerID == nil {
ownerID = c.ownerID
}
obj := object.New()
obj.SetContainerID(containerID)
obj.SetOwnerID(ownerID)
obj.SetAttributes(attrs...)
obj.SetPayload(payload)
obj.SetPayloadSize(uint64(sz))
object.CalculateAndSetPayloadChecksum(obj) // needed for metabase key
id, err := object.CalculateID(obj)
if err != nil {
return oid.ID{}, fmt.Errorf("calculating object id: %v", err)
}
obj.SetID(id)
if err := object.CalculateAndSetSignature(c.key, obj); err != nil {
return oid.ID{}, fmt.Errorf("calculating signature: %v", err)
}
var req engine.PutPrm
req.WithObject(obj)
start := time.Now()
_, err = c.ng.Put(req)
c.onPut(uint64(sz), err, time.Since(start))
if err != nil {
return oid.ID{}, err
}
return id, nil
}
func (c *RawClient) Get(containerID cid.ID, objectID oid.ID) (*object.Object, error) {
var addr oid.Address
addr.SetContainer(containerID)
addr.SetObject(objectID)
var req engine.GetPrm
req.WithAddress(addr)
start := time.Now()
res, err := c.ng.Get(req)
var sz uint64
obj := res.Object()
if obj != nil {
sz = uint64(len(obj.Payload()))
}
c.onGet(sz, err, time.Since(start))
return obj, nil
}
func (c *RawClient) Delete(containerID cid.ID, objectID oid.ID) error {
var addr oid.Address
addr.SetContainer(containerID)
addr.SetObject(objectID)
var req engine.DeletePrm
req.WithAddress(addr)
start := time.Now()
_, err := c.ng.Delete(req)
c.onDelete(err, time.Since(start))
return err
}
func (c *RawClient) OwnerID() *user.ID {
return c.ownerID
}

View file

@ -1,120 +0,0 @@
package s3local
import (
"bytes"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/stats"
"github.com/dop251/goja"
"go.k6.io/k6/js/modules"
"go.k6.io/k6/metrics"
)
type Client struct {
vu modules.VU
l layer.Client
ownerID *user.ID
resolver layer.BucketResolver
}
type (
SuccessOrErrorResponse struct {
Success bool
Error string
}
CreateBucketResponse SuccessOrErrorResponse
PutResponse SuccessOrErrorResponse
DeleteResponse SuccessOrErrorResponse
GetResponse SuccessOrErrorResponse
)
func (c *Client) Put(bucket, key string, payload goja.ArrayBuffer) PutResponse {
cid, err := c.resolver.Resolve(c.vu.Context(), bucket)
if err != nil {
stats.Report(c.vu, objPutFails, 1)
return PutResponse{Error: err.Error()}
}
prm := &layer.PutObjectParams{
BktInfo: &data.BucketInfo{
Name: bucket,
CID: cid,
Owner: *c.ownerID,
Created: time.Now(),
},
Header: map[string]string{},
Object: key,
Size: int64(len(payload.Bytes())),
Reader: bytes.NewReader(payload.Bytes()),
}
start := time.Now()
if _, err := c.l.PutObject(c.vu.Context(), prm); err != nil {
stats.Report(c.vu, objPutFails, 1)
return PutResponse{Error: err.Error()}
}
stats.Report(c.vu, objPutDuration, metrics.D(time.Since(start)))
stats.Report(c.vu, objPutTotal, 1)
stats.ReportDataSent(c.vu, float64(prm.Size))
return PutResponse{Success: true}
}
func (c *Client) Get(bucket, key string) GetResponse {
cid, err := c.resolver.Resolve(c.vu.Context(), bucket)
if err != nil {
stats.Report(c.vu, objGetFails, 1)
return GetResponse{Error: err.Error()}
}
start := time.Now()
bktInfo := &data.BucketInfo{
Name: bucket,
CID: cid,
Owner: *c.ownerID,
}
headPrm := &layer.HeadObjectParams{
BktInfo: bktInfo,
Object: key,
}
extInfo, err := c.l.GetExtendedObjectInfo(c.vu.Context(), headPrm)
if err != nil {
stats.Report(c.vu, objGetFails, 1)
return GetResponse{Error: err.Error()}
}
wr := &recvDataReporter{}
getPrm := &layer.GetObjectParams{
BucketInfo: bktInfo,
ObjectInfo: extInfo.ObjectInfo,
Range: &layer.RangeParams{
Start: 0,
End: uint64(extInfo.ObjectInfo.Size),
},
Writer: wr,
}
if err := c.l.GetObject(c.vu.Context(), getPrm); err != nil {
stats.Report(c.vu, objGetFails, 1)
return GetResponse{Error: err.Error()}
}
stats.Report(c.vu, objGetDuration, metrics.D(time.Since(start)))
stats.Report(c.vu, objGetTotal, 1)
stats.ReportDataReceived(c.vu, wr.total)
return GetResponse{Success: true}
}
type recvDataReporter struct{ total float64 }
func (r *recvDataReporter) Write(p []byte) (int, error) {
r.total += float64(len(p))
return len(p), nil
}

View file

@ -1,90 +0,0 @@
package s3local
import (
"bytes"
"context"
"fmt"
"io"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/local/rawclient"
)
// frostfs implements the subset of layer.FrostFS needed for clients
// backed by local storage engines. Attempting to call any of the
// unimplemented methods panics.
type frostfs struct {
*rawclient.RawClient
}
func unimplementedMessage(fname string) string {
return fmt.Sprintf("layer.FrostFS.%s is unimplemented and should not be called. If you are seeing "+
"this error, it probably means you tried to use the s3local scenario for "+
"something other than filling a cluster (i.e. PUT or GET).", fname)
}
func (*frostfs) CreateContainer(context.Context, layer.PrmContainerCreate) (cid.ID, error) {
panic(unimplementedMessage("CreateContainer"))
}
func (*frostfs) Container(context.Context, cid.ID) (*container.Container, error) {
panic(unimplementedMessage("Container"))
}
func (*frostfs) UserContainers(context.Context, user.ID) ([]cid.ID, error) {
panic(unimplementedMessage("UserContainers"))
}
func (*frostfs) SetContainerEACL(context.Context, eacl.Table, *session.Container) error {
panic(unimplementedMessage("SetContainerEACL"))
}
func (*frostfs) ContainerEACL(context.Context, cid.ID) (*eacl.Table, error) {
panic(unimplementedMessage("ContainerEACL"))
}
func (*frostfs) DeleteContainer(context.Context, cid.ID, *session.Container) error {
panic(unimplementedMessage("DeleteContainer"))
}
func (f *frostfs) ReadObject(ctx context.Context, prm layer.PrmObjectRead) (*layer.ObjectPart, error) {
obj, err := f.Get(prm.Container, prm.Object)
if err != nil {
return nil, err
}
part := &layer.ObjectPart{}
if prm.WithHeader {
part.Head = obj
}
if prm.WithPayload {
part.Payload = io.NopCloser(bytes.NewReader(obj.Payload()))
}
return part, nil
}
func (f *frostfs) CreateObject(ctx context.Context, prm layer.PrmObjectCreate) (oid.ID, error) {
payload, err := io.ReadAll(prm.Payload)
if err != nil {
return oid.ID{}, fmt.Errorf("reading payload: %v", err)
}
hdrs := map[string]string{}
for _, attr := range prm.Attributes {
hdrs[attr[0]] = attr[1]
}
return f.Put(prm.Container, &prm.Creator, hdrs, payload)
}
func (f *frostfs) DeleteObject(context.Context, layer.PrmObjectDelete) error {
panic(unimplementedMessage("DeleteObject"))
}
func (f *frostfs) TimeToEpoch(ctx context.Context, now time.Time, future time.Time) (uint64, uint64, error) {
panic(unimplementedMessage("TimeToEpoch"))
}

View file

@ -1,167 +0,0 @@
package s3local
import (
"context"
"flag"
"fmt"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/pkg/service/tree"
"git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/local"
"git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/local/rawclient"
"git.frostfs.info/TrueCloudLab/xk6-frostfs/internal/stats"
"go.k6.io/k6/js/modules"
"go.k6.io/k6/metrics"
"go.uber.org/zap"
)
// RootModule is the global module object type. It is instantiated once per test
// run and will be used to create k6/x/frostfs/s3local module instances for each VU.
type RootModule struct {
m *local.RootModule
}
// Local represents an instance of the module for every VU.
type Local struct {
l *local.Local
}
// Ensure the interfaces are implemented correctly.
var (
_ modules.Module = &RootModule{}
_ modules.Instance = &Local{}
internalObjPutTotal, internalObjPutFails, internalObjPutDuration *metrics.Metric
internalObjGetTotal, internalObjGetFails, internalObjGetDuration *metrics.Metric
objPutTotal, objPutFails, objPutDuration *metrics.Metric
objGetTotal, objGetFails, objGetDuration *metrics.Metric
)
func init() {
modules.Register("k6/x/frostfs/s3local", &RootModule{
m: &local.RootModule{},
})
}
// NewModuleInstance implements the modules.Module interface and returns
// a new instance for each VU.
func (r *RootModule) NewModuleInstance(vu modules.VU) modules.Instance {
return &Local{local.NewLocalModuleInstance(vu, r.m.GetOrCreateEngine)}
}
// Exports implements the modules.Instance interface and returns the exports
// of the JS module.
func (s *Local) Exports() modules.Exports {
return modules.Exports{Default: s}
}
func (s *Local) Connect(configFile string, params map[string]string, bucketMapping map[string]string) (*Client, error) {
// Parse configuration flags.
fs := flag.NewFlagSet("s3local", flag.ContinueOnError)
hexKey := fs.String("hex_key", "", "Private key to use as a hexadecimal string. A random one is created if none is provided")
nodePosition := fs.Int("node_position", 0, "Position of this node in the node array if loading multiple nodes independently")
nodeCount := fs.Int("node_count", 1, "Number of nodes in the node array if loading multiple nodes independently")
debugLogger := fs.Bool("debug_logger", false, "Whether to use the development logger instead of the default one for debugging purposes")
{
args := make([]string, 0, len(params))
for k, v := range params {
args = append(args, fmt.Sprintf("-%s=%s", k, v))
}
if err := fs.Parse(args); err != nil {
return nil, fmt.Errorf("parsing parameters: %v", err)
}
}
// Validate and read configuration flags.
key, err := local.ParseOrCreateKey(*hexKey)
if err != nil {
return nil, fmt.Errorf("parsing hex_key: %v", err)
}
if *nodeCount <= 0 {
return nil, fmt.Errorf("node_count must be positive")
}
if *nodePosition < 0 || *nodePosition >= *nodeCount {
return nil, fmt.Errorf("node_position must be in the range [0, node_count-1]")
}
// Register metrics.
registry := metrics.NewRegistry()
internalObjPutTotal, _ = registry.NewMetric("s3local_internal_obj_put_total", metrics.Counter)
internalObjPutFails, _ = registry.NewMetric("s3local_internal_obj_put_fails", metrics.Counter)
internalObjPutDuration, _ = registry.NewMetric("s3local_internal_obj_put_duration", metrics.Trend, metrics.Time)
internalObjGetTotal, _ = registry.NewMetric("s3local_internal_obj_get_total", metrics.Counter)
internalObjGetFails, _ = registry.NewMetric("s3local_internal_obj_get_fails", metrics.Counter)
internalObjGetDuration, _ = registry.NewMetric("s3local_internal_obj_get_duration", metrics.Trend, metrics.Time)
objPutTotal, _ = registry.NewMetric("s3local_obj_put_total", metrics.Counter)
objPutFails, _ = registry.NewMetric("s3local_obj_put_fails", metrics.Counter)
objPutDuration, _ = registry.NewMetric("s3local_obj_put_duration", metrics.Trend, metrics.Time)
objGetTotal, _ = registry.NewMetric("s3local_obj_get_total", metrics.Counter)
objGetFails, _ = registry.NewMetric("s3local_obj_get_fails", metrics.Counter)
objGetDuration, _ = registry.NewMetric("s3local_obj_get_duration", metrics.Trend, metrics.Time)
// Create S3 layer backed by local storage engine and tree service.
ng, err := s.l.ResolveEngine(s.l.VU().Context(), configFile, *debugLogger)
if err != nil {
return nil, fmt.Errorf("connecting to engine for config %q: %v", configFile, err)
}
treeSvc := tree.NewTree(treeServiceEngineWrapper{
ng: ng,
pos: *nodePosition,
size: *nodeCount,
})
rc := rawclient.New(ng,
rawclient.WithKey(key.PrivateKey),
rawclient.WithPutHandler(func(sz uint64, err error, dt time.Duration) {
if err != nil {
stats.Report(s.l.VU(), internalObjPutFails, 1)
} else {
stats.Report(s.l.VU(), internalObjPutTotal, 1)
stats.Report(s.l.VU(), internalObjPutDuration, metrics.D(dt))
}
}),
rawclient.WithGetHandler(func(sz uint64, err error, dt time.Duration) {
if err != nil {
stats.Report(s.l.VU(), internalObjGetFails, 1)
} else {
stats.Report(s.l.VU(), internalObjGetTotal, 1)
stats.Report(s.l.VU(), internalObjGetDuration, metrics.D(dt))
}
}),
)
resolver, err := newFixedBucketResolver(bucketMapping)
if err != nil {
return nil, fmt.Errorf("creating bucket resolver: %v", err)
}
cfg := &layer.Config{
Caches: layer.DefaultCachesConfigs(zap.L()),
AnonKey: layer.AnonymousKey{Key: key},
Resolver: resolver,
TreeService: treeSvc,
}
l := layer.NewLayer(zap.L(), &frostfs{rc}, cfg)
l.Initialize(s.l.VU().Context(), nopEventListener{})
return &Client{
vu: s.l.VU(),
l: l,
ownerID: rc.OwnerID(),
resolver: resolver,
}, nil
}
type nopEventListener struct{}
func (nopEventListener) Subscribe(context.Context, string, layer.MsgHandler) error { return nil }
func (nopEventListener) Listen(context.Context) {}

View file

@ -1,32 +0,0 @@
package s3local
import (
"context"
"fmt"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
)
// fixedBucketResolver is a static bucket resolver from the provided map.
// This is needed to replace the normal resolver for local storage engine clients, since
// those should not use DNS or NNS for resolution.
type fixedBucketResolver map[string]cid.ID
func newFixedBucketResolver(bucketMapping map[string]string) (fixedBucketResolver, error) {
r := fixedBucketResolver{}
for bucket, cidStr := range bucketMapping {
var id cid.ID
if err := id.DecodeString(cidStr); err != nil {
return nil, fmt.Errorf("decoding container id %q: %v", cidStr, err)
}
r[bucket] = id
}
return r, nil
}
func (r fixedBucketResolver) Resolve(_ context.Context, bucket string) (cid.ID, error) {
if cid, resolved := r[bucket]; resolved {
return cid, nil
}
return cid.ID{}, fmt.Errorf("bucket %s is not mapped to any container", bucket)
}

View file

@ -1,211 +0,0 @@
package s3local
import (
"context"
"errors"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/pkg/service/tree"
)
// treeServiceEngineWrapper implements the basic functioning of tree service using
// only the local storage engine instance. The node position and count is fixed
// beforehand in order to coordinate multiple runs on different nodes of the same
// cluster.
//
// The implementation mostly emulates the following
//
// - https://git.frostfs.info/TrueCloudLab/frostfs-node/src/branch/master/pkg/services/tree/service.go
// - https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/src/branch/master/internal/frostfs/services/tree_client_grpc.go
//
// but skips details which are irrelevant for local storage engine-backed clients.
type treeServiceEngineWrapper struct {
ng *engine.StorageEngine
pos int
size int
}
type kv struct {
k string
v []byte
}
func (kv kv) GetKey() string { return kv.k }
func (kv kv) GetValue() []byte { return kv.v }
type nodeResponse struct {
meta []tree.Meta
nodeID uint64
parentID uint64
ts uint64
}
func (r nodeResponse) GetMeta() []tree.Meta { return r.meta }
func (r nodeResponse) GetNodeID() uint64 { return r.nodeID }
func (r nodeResponse) GetParentID() uint64 { return r.parentID }
func (r nodeResponse) GetTimestamp() uint64 { return r.ts }
func (s treeServiceEngineWrapper) GetNodes(ctx context.Context, p *tree.GetNodesParams) ([]tree.NodeResponse, error) {
nodeIDs, err := s.ng.TreeGetByPath(p.BktInfo.CID, p.TreeID, pilorama.AttributeFilename, p.Path, p.LatestOnly)
if err != nil {
if errors.Is(err, pilorama.ErrTreeNotFound) {
// This is needed in order for the tree implementation to create the tree/node
// if it doesn't exist already.
// See: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/src/branch/master/internal/frostfs/services/tree_client_grpc.go#L306
return nil, tree.ErrNodeNotFound
}
return nil, err
}
resps := make([]tree.NodeResponse, 0, len(nodeIDs))
for _, nodeID := range nodeIDs {
m, parentID, err := s.ng.TreeGetMeta(p.BktInfo.CID, p.TreeID, nodeID)
if err != nil {
return nil, err
}
resp := nodeResponse{
parentID: parentID,
nodeID: nodeID,
ts: m.Time,
}
if p.AllAttrs {
resp.meta = kvToTreeMeta(m.Items)
} else {
for _, it := range m.Items {
for _, attr := range p.Meta {
if it.Key == attr {
resp.meta = append(resp.meta, kv{it.Key, it.Value})
}
break
}
}
}
resps = append(resps, resp)
}
return resps, nil
}
func (s treeServiceEngineWrapper) GetSubTree(ctx context.Context, bktInfo *data.BucketInfo, treeID string, rootID uint64, depth uint32) ([]tree.NodeResponse, error) {
var resps []tree.NodeResponse
var traverse func(nodeID uint64, curDepth uint32) error
traverse = func(nodeID uint64, curDepth uint32) error {
m, parentID, err := s.ng.TreeGetMeta(bktInfo.CID, treeID, nodeID)
if err != nil {
return fmt.Errorf("getting meta: %v", err)
}
resps = append(resps, nodeResponse{
nodeID: nodeID,
parentID: parentID,
ts: m.Time,
meta: kvToTreeMeta(m.Items),
})
if curDepth >= depth {
return nil
}
children, err := s.ng.TreeGetChildren(bktInfo.CID, treeID, nodeID)
if err != nil {
return fmt.Errorf("getting children: %v", err)
}
for _, child := range children {
if err := traverse(child, curDepth+1); err != nil {
return err
}
}
return nil
}
if err := traverse(rootID, 0); err != nil {
return nil, fmt.Errorf("traversing: %v", err)
}
return resps, nil
}
func (s treeServiceEngineWrapper) AddNode(ctx context.Context, bktInfo *data.BucketInfo, treeID string, parentID uint64, meta map[string]string) (uint64, error) {
desc := pilorama.CIDDescriptor{
CID: bktInfo.CID,
Position: s.pos,
Size: s.size,
}
mv, err := s.ng.TreeMove(desc, treeID, &pilorama.Move{
Parent: parentID,
Child: pilorama.RootID,
Meta: pilorama.Meta{Items: mapToKV(meta)},
})
return mv.Child, err
}
func (s treeServiceEngineWrapper) AddNodeByPath(ctx context.Context, bktInfo *data.BucketInfo, treeID string, path []string, meta map[string]string) (uint64, error) {
desc := pilorama.CIDDescriptor{
CID: bktInfo.CID,
Position: s.pos,
Size: s.size,
}
mvs, err := s.ng.TreeAddByPath(desc, treeID, pilorama.AttributeFilename, path, mapToKV(meta))
if err != nil {
return pilorama.TrashID, err
}
return mvs[len(mvs)-1].Child, nil
}
func (s treeServiceEngineWrapper) MoveNode(ctx context.Context, bktInfo *data.BucketInfo, treeID string, nodeID, parentID uint64, meta map[string]string) error {
if nodeID == pilorama.RootID {
return fmt.Errorf("node with ID %d is the root and can't be moved", nodeID)
}
desc := pilorama.CIDDescriptor{
CID: bktInfo.CID,
Position: s.pos,
Size: s.size,
}
_, err := s.ng.TreeMove(desc, treeID, &pilorama.Move{
Parent: parentID,
Child: nodeID,
Meta: pilorama.Meta{
Items: mapToKV(meta),
},
})
return err
}
func (s treeServiceEngineWrapper) RemoveNode(ctx context.Context, bktInfo *data.BucketInfo, treeID string, nodeID uint64) error {
if nodeID == pilorama.RootID {
return fmt.Errorf("node with ID %d is the root and can't be removed", nodeID)
}
desc := pilorama.CIDDescriptor{
CID: bktInfo.CID,
Position: s.pos,
Size: s.size,
}
_, err := s.ng.TreeMove(desc, treeID, &pilorama.Move{
Parent: pilorama.TrashID,
Child: nodeID,
})
return err
}
func mapToKV(m map[string]string) []pilorama.KeyValue {
var kvs []pilorama.KeyValue
for k, v := range m {
kvs = append(kvs, pilorama.KeyValue{
Key: k,
Value: []byte(v),
})
}
return kvs
}
func kvToTreeMeta(x []pilorama.KeyValue) []tree.Meta {
ret := make([]tree.Meta, 0, len(x))
for _, x := range x {
ret = append(ret, kv{x.Key, x.Value})
}
return ret
}

View file

@ -1,24 +0,0 @@
# This configuration can be used for the local scenario when testing locally.
storage:
shard_num: 1
shard:
0:
metabase:
path: /tmp/k6_local/metabase
perm: 0600
blobstor:
- path: /tmp/k6_local/blobovnicza
type: blobovnicza
perm: 0600
opened_cache_capacity: 32
depth: 1
width: 1
- path: /tmp/k6_local/fstree
type: fstree
perm: 0600
depth: 4
writecache:
enabled: false
gc:
remover_batch_size: 100
remover_sleep_interval: 1m

View file

@ -5,10 +5,6 @@ import registry from 'k6/x/frostfs/registry';
import { SharedArray } from 'k6/data';
import { sleep } from 'k6';
import { textSummary } from './libs/k6-summary-0.0.2.js';
import { parseEnv } from './libs/env-parser.js';
import { uuidv4 } from './libs/k6-utils-1.4.0.js';
parseEnv();
const obj_list = new SharedArray('obj_list', function () {
return JSON.parse(open(__ENV.PREGEN_JSON)).objects;
@ -24,7 +20,7 @@ const summary_json = __ENV.SUMMARY_JSON || "/tmp/summary.json";
// Select random gRPC endpoint for current VU
const grpc_endpoints = __ENV.GRPC_ENDPOINTS.split(',');
const grpc_endpoint = grpc_endpoints[Math.floor(Math.random() * grpc_endpoints.length)];
const grpc_client = native.connect(grpc_endpoint, '', __ENV.DIAL_TIMEOUT ? parseInt(__ENV.DIAL_TIMEOUT) : 5, __ENV.STREAM_TIMEOUT ? parseInt(__ENV.STREAM_TIMEOUT) : 60);
const grpc_client = native.connect(grpc_endpoint, '', __ENV.DIAL_TIMEOUT ? parseInt(__ENV.DIAL_TIMEOUT) : 5, __ENV.STREAM_TIMEOUT ? parseInt(__ENV.STREAM_TIMEOUT) : 15);
const log = logging.new().withField("endpoint", grpc_endpoint);
const registry_enabled = !!__ENV.REGISTRY_FILE;
@ -47,7 +43,7 @@ if (registry_enabled && delete_age) {
}
const generator = datagen.generator(1024 * parseInt(__ENV.WRITE_OBJ_SIZE));
const generator = datagen.generator(1024 * parseInt(__ENV.WRITE_OBJ_SIZE), __ENV.PAYLOAD_TYPE || "");
const scenarios = {};
@ -171,3 +167,10 @@ export function obj_delete() {
obj_registry.deleteObject(obj.id);
}
export function uuidv4() {
return 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'.replace(/[xy]/g, function(c) {
let r = Math.random() * 16 | 0, v = c === 'x' ? r : (r & 0x3 | 0x8);
return v.toString(16);
});
}

View file

@ -1,198 +0,0 @@
import datagen from 'k6/x/frostfs/datagen';
import native from 'k6/x/frostfs/native';
import logging from 'k6/x/frostfs/logging';
import registry from 'k6/x/frostfs/registry';
import { SharedArray } from 'k6/data';
import { sleep } from 'k6';
import { textSummary } from './libs/k6-summary-0.0.2.js';
import { parseEnv } from './libs/env-parser.js';
import { uuidv4 } from './libs/k6-utils-1.4.0.js';
parseEnv();
const obj_list = new SharedArray('obj_list', function () {
return JSON.parse(open(__ENV.PREGEN_JSON)).objects;
});
const container_list = new SharedArray('container_list', function () {
return JSON.parse(open(__ENV.PREGEN_JSON)).containers;
});
const read_size = JSON.parse(open(__ENV.PREGEN_JSON)).obj_size;
const summary_json = __ENV.SUMMARY_JSON || "/tmp/summary.json";
// Select random gRPC endpoint for current VU
const grpc_endpoints = __ENV.GRPC_ENDPOINTS.split(',');
const grpc_endpoint = grpc_endpoints[Math.floor(Math.random() * grpc_endpoints.length)];
const grpc_client = native.connect(grpc_endpoint, '', __ENV.DIAL_TIMEOUT ? parseInt(__ENV.DIAL_TIMEOUT) : 5, __ENV.STREAM_TIMEOUT ? parseInt(__ENV.STREAM_TIMEOUT) : 60);
const log = logging.new().withField("endpoint", grpc_endpoint);
const registry_enabled = !!__ENV.REGISTRY_FILE;
const obj_registry = registry_enabled ? registry.open(__ENV.REGISTRY_FILE) : undefined;
const duration = __ENV.DURATION;
const delete_age = __ENV.DELETE_AGE ? parseInt(__ENV.DELETE_AGE) : undefined;
let obj_to_delete_selector = undefined;
if (registry_enabled && delete_age) {
obj_to_delete_selector = registry.getSelector(
__ENV.REGISTRY_FILE,
"obj_to_delete",
__ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0,
{
status: "created",
age: delete_age,
}
);
}
const generator = datagen.generator(1024 * parseInt(__ENV.WRITE_OBJ_SIZE));
const scenarios = {};
const time_unit = __ENV.TIME_UNIT || '1s';
const pre_alloc_write_vus = parseInt(__ENV.PRE_ALLOC_WRITERS || '0');
const max_write_vus = parseInt(__ENV.MAX_WRITERS || pre_alloc_write_vus);
const write_rate = parseInt(__ENV.WRITE_RATE || '0');
if (write_rate > 0) {
scenarios.write = {
executor: 'constant-arrival-rate',
duration: `${duration}s`,
preAllocatedVUs: pre_alloc_write_vus,
maxVUs: max_write_vus,
rate: write_rate,
timeUnit: time_unit,
exec: 'obj_write',
gracefulStop: '5s',
};
}
const pre_alloc_read_vus = parseInt(__ENV.PRE_ALLOC_READERS || '0');
const max_read_vus = parseInt(__ENV.MAX_READERS || pre_alloc_read_vus);
const read_rate = parseInt(__ENV.READ_RATE || '0');
if (read_rate > 0) {
scenarios.read = {
executor: 'constant-arrival-rate',
duration: `${duration}s`,
preAllocatedVUs: pre_alloc_write_vus,
maxVUs: max_read_vus,
rate: read_rate,
timeUnit: time_unit,
exec: 'obj_read',
gracefulStop: '5s',
};
}
const pre_alloc_delete_vus = parseInt(__ENV.PRE_ALLOC_DELETERS || '0');
const max_delete_vus = parseInt(__ENV.MAX_DELETERS || pre_alloc_write_vus);
const delete_rate = parseInt(__ENV.DELETE_RATE || '0');
if (delete_rate > 0) {
if (!obj_to_delete_selector) {
throw new Error('Positive DELETE worker number without a proper object selector');
}
scenarios.delete = {
executor: 'constant-arrival-rate',
duration: `${duration}s`,
preAllocatedVUs: pre_alloc_delete_vus,
maxVUs: max_delete_vus,
rate: delete_rate,
timeUnit: time_unit,
exec: 'obj_delete',
gracefulStop: '5s',
};
}
export const options = {
scenarios,
setupTimeout: '5s',
};
export function setup() {
const total_pre_allocated_vu_count = pre_alloc_write_vus + pre_alloc_read_vus + pre_alloc_delete_vus;
const total_max_vu_count = max_read_vus + max_write_vus + max_delete_vus
console.log(`Pregenerated containers: ${container_list.length}`);
console.log(`Pregenerated read object size: ${read_size}`);
console.log(`Pregenerated total objects: ${obj_list.length}`);
console.log(`Pre allocated reading VUs: ${pre_alloc_read_vus}`);
console.log(`Pre allocated writing VUs: ${pre_alloc_write_vus}`);
console.log(`Pre allocated deleting VUs: ${pre_alloc_delete_vus}`);
console.log(`Total pre allocated VUs: ${total_pre_allocated_vu_count}`);
console.log(`Max reading VUs: ${max_read_vus}`);
console.log(`Max writing VUs: ${max_write_vus}`);
console.log(`Max deleting VUs: ${max_delete_vus}`);
console.log(`Total max VUs: ${total_max_vu_count}`);
console.log(`Time unit: ${time_unit}`);
console.log(`Read rate: ${read_rate}`);
console.log(`Writing rate: ${write_rate}`);
console.log(`Delete rate: ${delete_rate}`);
}
export function teardown(data) {
if (obj_registry) {
obj_registry.close();
}
}
export function handleSummary(data) {
return {
'stdout': textSummary(data, { indent: ' ', enableColors: false }),
[summary_json]: JSON.stringify(data),
};
}
export function obj_write() {
if (__ENV.SLEEP_WRITE) {
sleep(__ENV.SLEEP_WRITE);
}
const headers = {
unique_header: uuidv4()
};
const container = container_list[Math.floor(Math.random() * container_list.length)];
const { payload, hash } = generator.genPayload(registry_enabled);
const resp = grpc_client.put(container, headers, payload);
if (!resp.success) {
log.withField("cid", container).error(resp.error);
return;
}
if (obj_registry) {
obj_registry.addObject(container, resp.object_id, "", "", hash);
}
}
export function obj_read() {
if (__ENV.SLEEP_READ) {
sleep(__ENV.SLEEP_READ);
}
const obj = obj_list[Math.floor(Math.random() * obj_list.length)];
const resp = grpc_client.get(obj.container, obj.object)
if (!resp.success) {
log.withFields({cid: obj.container, oid: obj.object}).error(resp.error);
}
}
export function obj_delete() {
if (__ENV.SLEEP_DELETE) {
sleep(__ENV.SLEEP_DELETE);
}
const obj = obj_to_delete_selector.nextObject();
if (!obj) {
return;
}
const resp = grpc_client.delete(obj.c_id, obj.o_id);
if (!resp.success) {
// Log errors except (2052 - object already deleted)
log.withFields({cid: obj.c_id, oid: obj.o_id}).error(resp.error);
return;
}
obj_registry.deleteObject(obj.id);
}

View file

@ -1,14 +1,9 @@
import datagen from 'k6/x/frostfs/datagen';
import logging from 'k6/x/frostfs/logging';
import registry from 'k6/x/frostfs/registry';
import http from 'k6/http';
import { SharedArray } from 'k6/data';
import { sleep } from 'k6';
import { textSummary } from './libs/k6-summary-0.0.2.js';
import { parseEnv } from './libs/env-parser.js';
import { uuidv4 } from './libs/k6-utils-1.4.0.js';
parseEnv();
const obj_list = new SharedArray('obj_list', function () {
return JSON.parse(open(__ENV.PREGEN_JSON)).objects;
@ -31,7 +26,7 @@ const obj_registry = registry_enabled ? registry.open(__ENV.REGISTRY_FILE) : und
const duration = __ENV.DURATION;
const generator = datagen.generator(1024 * parseInt(__ENV.WRITE_OBJ_SIZE));
const generator = datagen.generator(1024 * parseInt(__ENV.WRITE_OBJ_SIZE), __ENV.PAYLOAD_TYPE || "");
const scenarios = {};
@ -121,3 +116,10 @@ export function obj_read() {
log.withFields({status: resp.status, cid: obj.container, oid: obj.object}).error(resp.error);
}
}
export function uuidv4() {
return 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'.replace(/[xy]/g, function(c) {
let r = Math.random() * 16 | 0, v = c === 'x' ? r : (r & 0x3 | 0x8);
return v.toString(16);
});
}

View file

@ -1,10 +0,0 @@
import env from 'k6/x/frostfs/env';
export function parseEnv() {
if (__ENV.ENV_FILE) {
const parsedVars = env.parse(__ENV.ENV_FILE)
for (const prop in parsedVars) {
__ENV[prop] = __ENV[prop] || parsedVars[prop];
}
}
}

View file

@ -1,2 +0,0 @@
(()=>{"use strict";var t={n:r=>{var e=r&&r.__esModule?()=>r.default:()=>r;return t.d(e,{a:e}),e},d:(r,e)=>{for(var n in e)t.o(e,n)&&!t.o(r,n)&&Object.defineProperty(r,n,{enumerable:!0,get:e[n]})},o:(t,r)=>Object.prototype.hasOwnProperty.call(t,r),r:t=>{"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(t,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(t,"__esModule",{value:!0})}},r={};t.r(r),t.d(r,{findBetween:()=>x,getCurrentStageIndex:()=>i,normalDistributionStages:()=>m,parseDuration:()=>o,randomIntBetween:()=>d,randomItem:()=>h,randomString:()=>p,tagWithCurrentStageIndex:()=>u,tagWithCurrentStageProfile:()=>s,uuidv4:()=>g});const e=require("k6/execution");var n=t.n(e);function o(t){if(null==t||t.length<1)throw new Error("str is empty");for(var r=0,e="",n={},o=0;o<t.length;o++)if((a(t[o])||"."==t[o])&&(e+=t[o]),null!=t[o+1]&&!a(t[o+1])&&"."!=t[o+1]){var i=parseFloat(e,10),u=t[o+1];switch(u){case"d":r+=24*i*60*60*1e3;break;case"h":r+=60*i*60*1e3;break;case"m":o+2<t.length&&"s"==t[o+2]?(r+=Math.trunc(i),o++,u="ms"):r+=60*i*1e3;break;case"s":r+=1e3*i;break;default:throw new Error("".concat(u," is an unsupported time unit"))}if(n[u])throw new Error("".concat(u," time unit is provided multiple times"));n[u]=!0,o++,e=""}return e.length>0&&(r+=parseFloat(e,10)),r}function a(t){return t>="0"&&t<="9"}function i(){if(null==n()||null==n().test||null==n().test.options)throw new Error("k6/execution.test.options is undefined - getCurrentStageIndex requires a k6 v0.38.0 or later. Please, upgrade for getting k6/execution.test.options supported.");var t=n().test.options.scenarios[n().scenario.name];if(null==t)throw new Error("the exec.test.options object doesn't contain the current scenario ".concat(n().scenario.name));if(null==t.stages)throw new Error("only ramping-vus or ramping-arravial-rate supports stages, it is not possible to get a stage index on other executors.");if(t.stages.length<1)throw new Error("the current scenario ".concat(t.name," doesn't contain any stage"));for(var r=0,e=new Date-n().scenario.startTime,a=0;a<t.stages.length;a++)if(e<(r+=o(t.stages[a].duration)))return a;return t.stages.length-1}function u(){n().vu.tags.stage=i()}function s(){n().vu.tags.stage_profile=function(){var t=i();if(t<1)return"ramp-up";var r=n().test.options.scenarios[n().scenario.name].stages,e=r[t],o=r[t-1];return e.target>o.target?"ramp-up":o.target==e.target?"steady":"ramp-down"}()}const l=require("k6/crypto");function c(t){return function(t){if(Array.isArray(t))return f(t)}(t)||function(t){if("undefined"!=typeof Symbol&&null!=t[Symbol.iterator]||null!=t["@@iterator"])return Array.from(t)}(t)||function(t,r){if(!t)return;if("string"==typeof t)return f(t,r);var e=Object.prototype.toString.call(t).slice(8,-1);"Object"===e&&t.constructor&&(e=t.constructor.name);if("Map"===e||"Set"===e)return Array.from(t);if("Arguments"===e||/^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(e))return f(t,r)}(t)||function(){throw new TypeError("Invalid attempt to spread non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")}()}function f(t,r){(null==r||r>t.length)&&(r=t.length);for(var e=0,n=new Array(r);e<r;e++)n[e]=t[e];return n}function g(){var t=arguments.length>0&&void 0!==arguments[0]&&arguments[0];return t?y():v()}function d(t,r){return Math.floor(Math.random()*(r-t+1)+t)}function h(t){return t[Math.floor(Math.random()*t.length)]}function p(t){for(var r=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"abcdefghijklmnopqrstuvwxyz",e="";t--;)e+=r[Math.random()*r.length|0];return e}function x(t,r,e){for(var n,o=arguments.length>3&&void 0!==arguments[3]&&arguments[3],a=[],i=!0,u=0;i&&-1!=(n=t.indexOf(r))&&(n+=r.length,-1!=(u=t.indexOf(e,n)));){var s=t.substring(n,u);if(!o)return s;a.push(s),t=t.substring(u+e.length)}return a.length?a:null}function m(t,r){var e=arguments.length>2&&void 0!==arguments[2]?arguments[2]:10;function n(t,r,e){return Math.exp(-.5*Math.pow((e-t)/r,2))/(r*Math.sqrt(2*Math.PI))}for(var o=0,a=1,i=new Array(e+2).fill(0),u=new Array(e+2).fill(Math.ceil(r/6)),s=[],l=0;l<=e;l++)i[l]=n(o,a,-2*a+4*a*l/e);for(var f=Math.max.apply(Math,c(i)),g=i.map((function(r){return Math.round(r*t/f)})),d=1;d<=e;d++)u[d]=Math.ceil(4*r/(6*e));for(var h=0;h<=e+1;h++)s.push({duration:"".concat(u[h],"s"),target:g[h]});return s}function v(){return"xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx".replace(/[xy]/g,(function(t){var r=16*Math.random()|0;return("x"===t?r:3&r|8).toString(16)}))}function y(){for(var t=[],r=0;r<256;++r)t.push((r+256).toString(16).slice(1));var e=new Uint8Array((0,l.randomBytes)(16));return e[6]=15&e[6]|64,e[8]=63&e[8]|128,(t[e[0]]+t[e[1]]+t[e[2]]+t[e[3]]+"-"+t[e[4]]+t[e[5]]+"-"+t[e[6]]+t[e[7]]+"-"+t[e[8]]+t[e[9]]+"-"+t[e[10]]+t[e[11]]+t[e[12]]+t[e[13]]+t[e[14]]+t[e[15]]).toLowerCase()}var w=exports;for(var b in r)w[b]=r[b];r.__esModule&&Object.defineProperty(w,"__esModule",{value:!0})})();
//# sourceMappingURL=index.js.map

View file

@ -1,158 +0,0 @@
import datagen from 'k6/x/frostfs/datagen';
import local from 'k6/x/frostfs/local';
import logging from 'k6/x/frostfs/logging';
import registry from 'k6/x/frostfs/registry';
import { SharedArray } from 'k6/data';
import { textSummary } from './libs/k6-summary-0.0.2.js';
import { parseEnv } from './libs/env-parser.js';
import { uuidv4 } from './libs/k6-utils-1.4.0.js';
parseEnv();
const obj_list = new SharedArray('obj_list', function () {
return JSON.parse(open(__ENV.PREGEN_JSON)).objects;
});
const container_list = new SharedArray('container_list', function () {
return JSON.parse(open(__ENV.PREGEN_JSON)).containers;
});
const read_size = JSON.parse(open(__ENV.PREGEN_JSON)).obj_size;
const summary_json = __ENV.SUMMARY_JSON || "/tmp/summary.json";
const config_file = __ENV.CONFIG_FILE;
const debug_logger = (__ENV.DEBUG_LOGGER || 'false') == 'true';
const local_client = local.connect(config_file, '', debug_logger);
const log = logging.new().withField("config", config_file);
const registry_enabled = !!__ENV.REGISTRY_FILE;
const obj_registry = registry_enabled ? registry.open(__ENV.REGISTRY_FILE) : undefined;
const duration = __ENV.DURATION;
const delete_age = __ENV.DELETE_AGE ? parseInt(__ENV.DELETE_AGE) : undefined;
let obj_to_delete_selector = undefined;
if (registry_enabled && delete_age) {
obj_to_delete_selector = registry.getSelector(
__ENV.REGISTRY_FILE,
"obj_to_delete",
__ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0,
{
status: "created",
age: delete_age,
}
);
}
const generator = datagen.generator(1024 * parseInt(__ENV.WRITE_OBJ_SIZE));
const scenarios = {};
const write_vu_count = parseInt(__ENV.WRITERS || '0');
if (write_vu_count > 0) {
scenarios.write = {
executor: 'constant-vus',
vus: write_vu_count,
duration: `${duration}s`,
exec: 'obj_write',
gracefulStop: '5s',
};
}
const read_vu_count = parseInt(__ENV.READERS || '0');
if (read_vu_count > 0) {
scenarios.read = {
executor: 'constant-vus',
vus: read_vu_count,
duration: `${duration}s`,
exec: 'obj_read',
gracefulStop: '5s',
};
}
const delete_vu_count = parseInt(__ENV.DELETERS || '0');
if (delete_vu_count > 0) {
if (!obj_to_delete_selector) {
throw new Error('Positive DELETE worker number without a proper object selector');
}
scenarios.delete = {
executor: 'constant-vus',
vus: delete_vu_count,
duration: `${duration}s`,
exec: 'obj_delete',
gracefulStop: '5s',
};
}
export const options = {
scenarios,
setupTimeout: '5s',
};
export function setup() {
const total_vu_count = write_vu_count + read_vu_count + delete_vu_count;
console.log(`Pregenerated containers: ${container_list.length}`);
console.log(`Pregenerated read object size: ${read_size}`);
console.log(`Pregenerated total objects: ${obj_list.length}`);
console.log(`Reading VUs: ${read_vu_count}`);
console.log(`Writing VUs: ${write_vu_count}`);
console.log(`Deleting VUs: ${delete_vu_count}`);
console.log(`Total VUs: ${total_vu_count}`);
}
export function teardown(data) {
if (obj_registry) {
obj_registry.close();
}
}
export function handleSummary(data) {
return {
'stdout': textSummary(data, { indent: ' ', enableColors: false }),
[summary_json]: JSON.stringify(data),
};
}
export function obj_write() {
const headers = {
unique_header: uuidv4()
};
const container = container_list[Math.floor(Math.random() * container_list.length)];
const { payload, hash } = generator.genPayload(registry_enabled);
const resp = local_client.put(container, headers, payload);
if (!resp.success) {
log.withField("cid", container).error(resp.error);
return;
}
if (obj_registry) {
obj_registry.addObject(container, resp.object_id, "", "", hash);
}
}
export function obj_read() {
const obj = obj_list[Math.floor(Math.random() * obj_list.length)];
const resp = local_client.get(obj.container, obj.object)
if (!resp.success) {
log.withFields({cid: obj.container, oid: obj.object}).error(resp.error);
}
}
export function obj_delete() {
const obj = obj_to_delete_selector.nextObject();
if (!obj) {
return;
}
const resp = local_client.delete(obj.c_id, obj.o_id);
if (!resp.success) {
// Log errors except (2052 - object already deleted)
log.withFields({cid: obj.c_id, oid: obj.o_id}).error(resp.error);
return;
}
obj_registry.deleteObject(obj.id);
}

View file

@ -11,9 +11,9 @@ def create_bucket(endpoint, versioning, location):
bucket_name = str(uuid.uuid4())
cmd_line = f"aws --no-verify-ssl s3api create-bucket --bucket {bucket_name} " \
f"--endpoint https://{endpoint} {location}"
f"--endpoint http://{endpoint} {location}"
cmd_line_ver = f"aws --no-verify-ssl s3api put-bucket-versioning --bucket {bucket_name} " \
f"--versioning-configuration Status=Enabled --endpoint https://{endpoint} "
f"--versioning-configuration Status=Enabled --endpoint http://{endpoint} "
out, success = execute_cmd(cmd_line)
@ -21,7 +21,7 @@ def create_bucket(endpoint, versioning, location):
if "succeeded and you already own it" in out:
bucket_create_marker = True
else:
print(f" > Bucket {bucket_name} has not been created:\n{out}")
print(f" > Bucket {bucket_name} has not been created.")
else:
bucket_create_marker = True
print(f"cmd: {cmd_line}")
@ -29,7 +29,7 @@ def create_bucket(endpoint, versioning, location):
if bucket_create_marker and versioning == "True":
out, success = execute_cmd(cmd_line_ver)
if not success:
print(f" > Bucket versioning has not been applied for bucket {bucket_name}:\n{out}")
print(f" > Bucket versioning has not been applied for bucket {bucket_name}.")
else:
print(f" > Bucket versioning has been applied.")
@ -39,8 +39,8 @@ def create_bucket(endpoint, versioning, location):
def upload_object(bucket, payload_filepath, endpoint):
object_name = str(uuid.uuid4())
cmd_line = f"aws --no-verify-ssl s3api put-object --bucket {bucket} --key {object_name} " \
f"--body {payload_filepath} --endpoint https://{endpoint}"
cmd_line = f"aws s3api put-object --bucket {bucket} --key {object_name} " \
f"--body {payload_filepath} --endpoint http://{endpoint}"
out, success = execute_cmd(cmd_line)
if not success:

View file

@ -1,29 +0,0 @@
#!/usr/bin/python3
import argparse
import json
import requests
parser = argparse.ArgumentParser()
parser.add_argument('--endpoint', help='Endpoint of the S3 gateway')
parser.add_argument('--preset_file', help='JSON file path with s3 preset')
args = parser.parse_args()
def main():
with open(args.preset_file) as f:
preset_text = f.read()
preset = json.loads(preset_text)
containers = []
for bucket in preset.get('buckets'):
resp = requests.head(f'{args.endpoint}/{bucket}', verify=False)
containers.append(resp.headers['X-Container-Id'])
preset['containers'] = containers
with open(args.preset_file, 'w+') as f:
json.dump(preset, f, ensure_ascii=False, indent=2)
if __name__ == "__main__":
main()

View file

@ -1,14 +1,10 @@
---
# How to execute scenarios
**Note:** you can provide file with all environment variables (system env variables overrides env from file) using
`-e ENV_FILE=.env` (relative path to that file must start from working directory):
```shell
$ ./k6 run -e ENV_FILE=.env some-scenario.js
```
## Common options for gRPC, HTTP, S3 scenarios:
## Common options for all scenarios:
Scenarios `grpc.js`, `local.js`, `http.js` and `s3.js` support the following options:
Scenarios `grpc.js`, `http.js` and `s3.js` support the following options:
* `DURATION` - duration of scenario in seconds.
* `READERS` - number of VUs performing read operations.
* `WRITERS` - number of VUs performing write operations.
@ -18,10 +14,7 @@ Scenarios `grpc.js`, `local.js`, `http.js` and `s3.js` support the following opt
* `SLEEP_WRITE` - time interval (in seconds) between writing VU iterations.
* `SLEEP_READ` - time interval (in seconds) between reading VU iterations.
* `SELECTION_SIZE` - size of batch to select for deletion (default: 1000).
## Common options for the local scenarios:
* `DEBUG_LOGGER` - uses a development logger for the local storage engine to aid debugging (default: false).
* `PAYLOAD_TYPE` - type of an object payload ("random" or "text", default: "random").
Examples of how to use these options are provided below for each scenario.
@ -50,26 +43,6 @@ Options (in addition to the common options):
* `DIAL_TIMEOUT` - timeout to connect to a node (in seconds).
* `STREAM_TIMEOUT` - timeout for a single stream message for `PUT`/`GET` operations (in seconds).
## Local
1. Create pre-generated containers or objects:
The tests will use all pre-created containers for PUT operations and all pre-created objects for READ operations. There is no dedicated script to preset HTTP scenario, so we use the same script as for gRPC:
```shell
$ ./scenarios/preset/preset_grpc.py --size 1024 --containers 1 --out grpc.json --endpoint host1:8080 --preload_obj 500
```
2. Execute scenario with options:
```shell
$ ./k6 run -e DURATION=60 -e WRITE_OBJ_SIZE=8192 -e READERS=20 -e WRITERS=20 -e DELETERS=30 -e DELETE_AGE=10 -e REGISTRY_FILE=registry.bolt -e CONFIG_FILE=/path/to/config.yaml -e PREGEN_JSON=./grpc.json scenarios/local.js
```
Options (in addition to the common options):
* `CONFIG_FILE` - path to the local configuration file used for the storage node. Only the storage configuration section is used.
* `DELETERS` - number of VUs performing delete operations (using deleters requires that options `DELETE_AGE` and `REGISTRY_FILE` are specified as well).
* `DELETE_AGE` - age of object in seconds before which it can not be deleted. This parameter can be used to control how many objects we have in the system under load.
## HTTP
1. Create pre-generated containers or objects:
@ -128,29 +101,6 @@ Options (in addition to the common options):
* `SLEEP_DELETE` - time interval (in seconds) between deleting VU iterations.
* `OBJ_NAME` - if specified, this name will be used for all write operations instead of random generation.
## S3 Local
1. Follow steps 1. and 2. from the normal S3 scenario in order to obtain credentials and a preset file with the information about the buckets and objects that were pre-created.
2. Assuming the preset file was named `pregen.json`, we need to populate the bucket-to-container mapping before running the local S3 scenario:
**WARNING**: Be aware that this command will overwrite the `containers` list field in `pregen.json` file. Make a backup if needed beforehand.
```shell
$ ./scenarios/preset/resolve_containers_in_preset.py --endpoint s3host:8080 --preset_file pregen.json
```
After this, the `pregen.json` file will contain a `containers` list field the same length as `buckets`, which is the mapping of bucket name to container ID in the order they appear.
3. Execute the scenario with the desired options. For example:
```shell
$ ./k6 run -e DURATION=60 -e WRITE_OBJ_SIZE=8192 -e READERS=20 -e WRITERS=20 -e CONFIG_FILE=/path/to/node/config.yml -e PREGEN_JSON=pregen.json scenarios/s3local.js
```
Note that the `s3local` scenario currently does not support deleters.
Options (in addition to the common options):
* `OBJ_NAME` - if specified, this name will be used for all write operations instead of random generation.
## Verify
This scenario allows to verify that objects created by a previous run are really stored in the system and their data is not corrupted. Running this scenario assumes that you've already run gRPC or HTTP or S3 scenario with option `REGISTRY_FILE`.

View file

@ -1,14 +1,9 @@
import datagen from 'k6/x/frostfs/datagen';
import logging from 'k6/x/frostfs/logging';
import registry from 'k6/x/frostfs/registry';
import s3 from 'k6/x/frostfs/s3';
import { SharedArray } from 'k6/data';
import { sleep } from 'k6';
import { textSummary } from './libs/k6-summary-0.0.2.js';
import { parseEnv } from './libs/env-parser.js';
import { uuidv4 } from './libs/k6-utils-1.4.0.js';
parseEnv();
const obj_list = new SharedArray('obj_list', function () {
return JSON.parse(open(__ENV.PREGEN_JSON)).objects;
@ -46,7 +41,7 @@ if (registry_enabled && delete_age) {
);
}
const generator = datagen.generator(1024 * parseInt(__ENV.WRITE_OBJ_SIZE));
const generator = datagen.generator(1024 * parseInt(__ENV.WRITE_OBJ_SIZE), __ENV.PAYLOAD_TYPE || "");
const scenarios = {};
@ -168,3 +163,10 @@ export function obj_delete() {
obj_registry.deleteObject(obj.id);
}
export function uuidv4() {
return 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'.replace(/[xy]/g, function(c) {
let r = Math.random() * 16 | 0, v = c === 'x' ? r : (r & 0x3 | 0x8);
return v.toString(16);
});
}

View file

@ -1,197 +0,0 @@
import datagen from 'k6/x/frostfs/datagen';
import logging from 'k6/x/frostfs/logging';
import registry from 'k6/x/frostfs/registry';
import s3 from 'k6/x/frostfs/s3';
import { SharedArray } from 'k6/data';
import { sleep } from 'k6';
import { textSummary } from './libs/k6-summary-0.0.2.js';
import { parseEnv } from './libs/env-parser.js';
import { uuidv4 } from './libs/k6-utils-1.4.0.js';
parseEnv();
const obj_list = new SharedArray('obj_list', function () {
return JSON.parse(open(__ENV.PREGEN_JSON)).objects;
});
const bucket_list = new SharedArray('bucket_list', function () {
return JSON.parse(open(__ENV.PREGEN_JSON)).buckets;
});
const read_size = JSON.parse(open(__ENV.PREGEN_JSON)).obj_size;
const summary_json = __ENV.SUMMARY_JSON || "/tmp/summary.json";
// Select random S3 endpoint for current VU
const s3_endpoints = __ENV.S3_ENDPOINTS.split(',');
const s3_endpoint = s3_endpoints[Math.floor(Math.random() * s3_endpoints.length)];
const s3_client = s3.connect(`http://${s3_endpoint}`);
const log = logging.new().withField("endpoint", s3_endpoint);
const registry_enabled = !!__ENV.REGISTRY_FILE;
const obj_registry = registry_enabled ? registry.open(__ENV.REGISTRY_FILE) : undefined;
const duration = __ENV.DURATION;
const delete_age = __ENV.DELETE_AGE ? parseInt(__ENV.DELETE_AGE) : undefined;
let obj_to_delete_selector = undefined;
if (registry_enabled && delete_age) {
obj_to_delete_selector = registry.getSelector(
__ENV.REGISTRY_FILE,
"obj_to_delete",
__ENV.SELECTION_SIZE ? parseInt(__ENV.SELECTION_SIZE) : 0,
{
status: "created",
age: delete_age,
}
);
}
const generator = datagen.generator(1024 * parseInt(__ENV.WRITE_OBJ_SIZE));
const scenarios = {};
const time_unit = __ENV.TIME_UNIT || '1s';
const pre_alloc_write_vus = parseInt(__ENV.PRE_ALLOC_WRITERS || '0');
const max_write_vus = parseInt(__ENV.MAX_WRITERS || pre_alloc_write_vus);
const write_rate = parseInt(__ENV.WRITE_RATE || '0');
if (write_rate > 0) {
scenarios.write = {
executor: 'constant-arrival-rate',
duration: `${duration}s`,
preAllocatedVUs: pre_alloc_write_vus,
maxVUs: max_write_vus,
rate: write_rate,
timeUnit: time_unit,
exec: 'obj_write',
gracefulStop: '5s',
};
}
const pre_alloc_read_vus = parseInt(__ENV.PRE_ALLOC_READERS || '0');
const max_read_vus = parseInt(__ENV.MAX_READERS || pre_alloc_read_vus);
const read_rate = parseInt(__ENV.READ_RATE || '0');
if (read_rate > 0) {
scenarios.read = {
executor: 'constant-arrival-rate',
duration: `${duration}s`,
preAllocatedVUs: pre_alloc_write_vus,
maxVUs: max_read_vus,
rate: read_rate,
timeUnit: time_unit,
exec: 'obj_read',
gracefulStop: '5s',
};
}
const pre_alloc_delete_vus = parseInt(__ENV.PRE_ALLOC_DELETERS || '0');
const max_delete_vus = parseInt(__ENV.MAX_DELETERS || pre_alloc_write_vus);
const delete_rate = parseInt(__ENV.DELETE_RATE || '0');
if (delete_rate > 0) {
if (!obj_to_delete_selector) {
throw new Error('Positive DELETE worker number without a proper object selector');
}
scenarios.delete = {
executor: 'constant-arrival-rate',
duration: `${duration}s`,
preAllocatedVUs: pre_alloc_delete_vus,
maxVUs: max_delete_vus,
rate: delete_rate,
timeUnit: time_unit,
exec: 'obj_delete',
gracefulStop: '5s',
};
}
export const options = {
scenarios,
setupTimeout: '5s',
};
export function setup() {
const total_pre_allocated_vu_count = pre_alloc_write_vus + pre_alloc_read_vus + pre_alloc_delete_vus;
const total_max_vu_count = max_read_vus + max_write_vus + max_delete_vus
console.log(`Pregenerated buckets: ${bucket_list.length}`);
console.log(`Pregenerated read object size: ${read_size}`);
console.log(`Pregenerated total objects: ${obj_list.length}`);
console.log(`Pre allocated reading VUs: ${pre_alloc_read_vus}`);
console.log(`Pre allocated writing VUs: ${pre_alloc_write_vus}`);
console.log(`Pre allocated deleting VUs: ${pre_alloc_delete_vus}`);
console.log(`Total pre allocated VUs: ${total_pre_allocated_vu_count}`);
console.log(`Max reading VUs: ${max_read_vus}`);
console.log(`Max writing VUs: ${max_write_vus}`);
console.log(`Max deleting VUs: ${max_delete_vus}`);
console.log(`Total max VUs: ${total_max_vu_count}`);
console.log(`Time unit: ${time_unit}`);
console.log(`Read rate: ${read_rate}`);
console.log(`Writing rate: ${write_rate}`);
console.log(`Delete rate: ${delete_rate}`);
}
export function teardown(data) {
if (obj_registry) {
obj_registry.close();
}
}
export function handleSummary(data) {
return {
'stdout': textSummary(data, { indent: ' ', enableColors: false }),
[summary_json]: JSON.stringify(data),
};
}
export function obj_write() {
if (__ENV.SLEEP_WRITE) {
sleep(__ENV.SLEEP_WRITE);
}
const key = __ENV.OBJ_NAME || uuidv4();
const bucket = bucket_list[Math.floor(Math.random() * bucket_list.length)];
const { payload, hash } = generator.genPayload(registry_enabled);
const resp = s3_client.put(bucket, key, payload);
if (!resp.success) {
log.withFields({bucket: bucket, key: key}).error(resp.error);
return;
}
if (obj_registry) {
obj_registry.addObject("", "", bucket, key, hash);
}
}
export function obj_read() {
if (__ENV.SLEEP_READ) {
sleep(__ENV.SLEEP_READ);
}
const obj = obj_list[Math.floor(Math.random() * obj_list.length)];
const resp = s3_client.get(obj.bucket, obj.object);
if (!resp.success) {
log.withFields({bucket: obj.bucket, key: obj.object}).error(resp.error);
}
}
export function obj_delete() {
if (__ENV.SLEEP_DELETE) {
sleep(__ENV.SLEEP_DELETE);
}
const obj = obj_to_delete_selector.nextObject();
if (!obj) {
return;
}
const resp = s3_client.delete(obj.s3_bucket, obj.s3_key);
if (!resp.success) {
log.withFields({bucket: obj.s3_bucket, key: obj.s3_key, op: "DELETE"}).error(resp.error);
return;
}
obj_registry.deleteObject(obj.id);
}

View file

@ -1,127 +0,0 @@
import datagen from 'k6/x/frostfs/datagen';
import logging from 'k6/x/frostfs/logging';
import registry from 'k6/x/frostfs/registry';
import s3local from 'k6/x/frostfs/s3local';
import { SharedArray } from 'k6/data';
import { textSummary } from './libs/k6-summary-0.0.2.js';
import { parseEnv } from './libs/env-parser.js';
import { uuidv4 } from './libs/k6-utils-1.4.0.js';
parseEnv();
const obj_list = new SharedArray('obj_list', function () {
return JSON.parse(open(__ENV.PREGEN_JSON)).objects;
});
const container_list = new SharedArray('container_list', function () {
return JSON.parse(open(__ENV.PREGEN_JSON)).containers;
});
const bucket_list = new SharedArray('bucket_list', function () {
return JSON.parse(open(__ENV.PREGEN_JSON)).buckets;
});
function bucket_mapping() {
if (container_list.length != bucket_list.length) {
throw 'The number of containers and buckets in the preset file must be the same.';
}
let mapping = {};
for (let i = 0; i < container_list.length; ++i) {
mapping[bucket_list[i]] = container_list[i];
}
return mapping;
}
const read_size = JSON.parse(open(__ENV.PREGEN_JSON)).obj_size;
const summary_json = __ENV.SUMMARY_JSON || "/tmp/summary.json";
const config_file = __ENV.CONFIG_FILE;
const s3_client = s3local.connect(config_file, {
'debug_logger': __ENV.DEBUG_LOGGER || 'false',
}, bucket_mapping());
const log = logging.new().withField("config", config_file);
const registry_enabled = !!__ENV.REGISTRY_FILE;
const obj_registry = registry_enabled ? registry.open(__ENV.REGISTRY_FILE) : undefined;
const duration = __ENV.DURATION;
const generator = datagen.generator(1024 * parseInt(__ENV.WRITE_OBJ_SIZE));
const scenarios = {};
const write_vu_count = parseInt(__ENV.WRITERS || '0');
if (write_vu_count > 0) {
scenarios.write = {
executor: 'constant-vus',
vus: write_vu_count,
duration: `${duration}s`,
exec: 'obj_write',
gracefulStop: '5s',
};
}
const read_vu_count = parseInt(__ENV.READERS || '0');
if (read_vu_count > 0) {
scenarios.read = {
executor: 'constant-vus',
vus: read_vu_count,
duration: `${duration}s`,
exec: 'obj_read',
gracefulStop: '5s',
};
}
export const options = {
scenarios,
setupTimeout: '5s',
};
export function setup() {
const total_vu_count = write_vu_count + read_vu_count;
console.log(`Pregenerated buckets: ${bucket_list.length}`);
console.log(`Pregenerated read object size: ${read_size}`);
console.log(`Pregenerated total objects: ${obj_list.length}`);
console.log(`Reading VUs: ${read_vu_count}`);
console.log(`Writing VUs: ${write_vu_count}`);
console.log(`Total VUs: ${total_vu_count}`);
}
export function teardown(data) {
if (obj_registry) {
obj_registry.close();
}
}
export function handleSummary(data) {
return {
'stdout': textSummary(data, { indent: ' ', enableColors: false }),
[summary_json]: JSON.stringify(data),
};
}
export function obj_write() {
const key = __ENV.OBJ_NAME || uuidv4();
const bucket = bucket_list[Math.floor(Math.random() * bucket_list.length)];
const { payload, hash } = generator.genPayload(registry_enabled);
const resp = s3_client.put(bucket, key, payload);
if (!resp.success) {
log.withFields({bucket: bucket, key: key}).error(resp.error);
return;
}
if (obj_registry) {
obj_registry.addObject("", "", bucket, key, hash);
}
}
export function obj_read() {
const obj = obj_list[Math.floor(Math.random() * obj_list.length)];
const resp = s3_client.get(obj.bucket, obj.object);
if (!resp.success) {
log.withFields({bucket: obj.bucket, key: obj.object}).error(resp.error);
}
}

View file

@ -4,9 +4,6 @@ import s3 from 'k6/x/frostfs/s3';
import { sleep } from 'k6';
import { Counter } from 'k6/metrics';
import { textSummary } from './libs/k6-summary-0.0.2.js';
import { parseEnv } from './libs/env-parser.js';
parseEnv();
const obj_registry = registry.open(__ENV.REGISTRY_FILE);