diff --git a/internal/local/local.go b/internal/local/local.go index 7f34100..aab5b45 100644 --- a/internal/local/local.go +++ b/internal/local/local.go @@ -31,6 +31,7 @@ import ( "go.k6.io/k6/js/modules" "go.k6.io/k6/metrics" "go.uber.org/zap" + "golang.org/x/sys/unix" ) // RootModule is the global module object type. It is instantiated once per test @@ -77,6 +78,28 @@ func NewLocalModuleInstance(vu modules.VU, resolveEngine func(context.Context, s } } +// checkResourceLimits checks the current limit on NOFILE. +// +// The usual default is around 1024 and this is too low for production clusters where a value of +// about 65536 is needed in order to not run into errors because of attempting to open too many files. +// This is needed for the local storage engine scenarios, where the user running the scenario is not +// necessarily the service user, for which the limits are preconfigured correctly. +// +// See: https://k6.io/docs/misc/fine-tuning-os/ +func checkResourceLimits() error { + const ( + minNofileLimit = 1 << 16 + ) + rlimit := &unix.Rlimit{} + if err := unix.Getrlimit(unix.RLIMIT_NOFILE, rlimit); err != nil { + return fmt.Errorf("getting resource limits: %v", err) + } + if rlimit.Cur < minNofileLimit { + return fmt.Errorf("nofile limit is too low: %d", rlimit.Cur) + } + return nil +} + // GetOrCreateEngine returns the current engine instance for the given configuration file, // creating a new one if none exists. Note that the identity of configuration files is their // file name for the purposes of test runs. @@ -96,6 +119,9 @@ func (r *RootModule) GetOrCreateEngine(ctx context.Context, configFile string, d if err != nil { return nil, fmt.Errorf("creating engine options from config: %v", err) } + if err := checkResourceLimits(); err != nil { + return nil, err + } r.ng = engine.New(ngOpts...) for i, opts := range shardOpts { if _, err := r.ng.AddShard(opts...); err != nil {