forked from TrueCloudLab/frostfs-s3-gw
[#541] Use default value if config param is unset after SIGHUP
Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
This commit is contained in:
parent
368c7d2acd
commit
d99c1cf9d3
3 changed files with 170 additions and 117 deletions
|
@ -64,7 +64,7 @@ type (
|
|||
App struct {
|
||||
ctr s3middleware.Center
|
||||
log *zap.Logger
|
||||
cfg *viper.Viper
|
||||
cfg *appCfg
|
||||
pool *pool.Pool
|
||||
treePool *treepool.Pool
|
||||
key *keys.PrivateKey
|
||||
|
@ -132,14 +132,14 @@ type (
|
|||
}
|
||||
)
|
||||
|
||||
func newApp(ctx context.Context, log *Logger, v *viper.Viper) *App {
|
||||
settings := newAppSettings(log, v)
|
||||
func newApp(ctx context.Context, log *Logger, cfg *appCfg) *App {
|
||||
settings := newAppSettings(log, cfg.config())
|
||||
|
||||
objPool, treePool, key := getPools(ctx, log.logger, v, settings.dialerSource)
|
||||
objPool, treePool, key := getPools(ctx, log.logger, cfg.config(), settings.dialerSource)
|
||||
|
||||
app := &App{
|
||||
log: log.logger,
|
||||
cfg: v,
|
||||
cfg: cfg,
|
||||
pool: objPool,
|
||||
treePool: treePool,
|
||||
key: key,
|
||||
|
@ -168,7 +168,7 @@ func (a *App) init(ctx context.Context) {
|
|||
}
|
||||
|
||||
func (a *App) initAuthCenter(ctx context.Context) {
|
||||
if a.cfg.IsSet(cfgContainersAccessBox) {
|
||||
if a.cfg.config().IsSet(cfgContainersAccessBox) {
|
||||
cnrID, err := a.resolveContainerID(ctx, cfgContainersAccessBox)
|
||||
if err != nil {
|
||||
a.log.Fatal(logs.CouldNotFetchAccessBoxContainerInfo, zap.Error(err))
|
||||
|
@ -179,11 +179,11 @@ func (a *App) initAuthCenter(ctx context.Context) {
|
|||
cfg := tokens.Config{
|
||||
FrostFS: frostfs.NewAuthmateFrostFS(frostfs.NewFrostFS(a.pool, a.key), a.log),
|
||||
Key: a.key,
|
||||
CacheConfig: getAccessBoxCacheConfig(a.cfg, a.log),
|
||||
RemovingCheckAfterDurations: fetchRemovingCheckInterval(a.cfg, a.log),
|
||||
CacheConfig: getAccessBoxCacheConfig(a.cfg.config(), a.log),
|
||||
RemovingCheckAfterDurations: fetchRemovingCheckInterval(a.cfg.config(), a.log),
|
||||
}
|
||||
|
||||
a.ctr = auth.New(tokens.New(cfg), a.cfg.GetStringSlice(cfgAllowedAccessKeyIDPrefixes), a.settings)
|
||||
a.ctr = auth.New(tokens.New(cfg), a.cfg.config().GetStringSlice(cfgAllowedAccessKeyIDPrefixes), a.settings)
|
||||
}
|
||||
|
||||
func (a *App) initLayer(ctx context.Context) {
|
||||
|
@ -197,7 +197,7 @@ func (a *App) initLayer(ctx context.Context) {
|
|||
user.IDFromKey(&gateOwner, a.key.PrivateKey.PublicKey)
|
||||
|
||||
var corsCnrInfo *data.BucketInfo
|
||||
if a.cfg.IsSet(cfgContainersCORS) {
|
||||
if a.cfg.config().IsSet(cfgContainersCORS) {
|
||||
corsCnrInfo, err = a.fetchContainerInfo(ctx, cfgContainersCORS)
|
||||
if err != nil {
|
||||
a.log.Fatal(logs.CouldNotFetchCORSContainerInfo, zap.Error(err))
|
||||
|
@ -205,7 +205,7 @@ func (a *App) initLayer(ctx context.Context) {
|
|||
}
|
||||
|
||||
var lifecycleCnrInfo *data.BucketInfo
|
||||
if a.cfg.IsSet(cfgContainersLifecycle) {
|
||||
if a.cfg.config().IsSet(cfgContainersLifecycle) {
|
||||
lifecycleCnrInfo, err = a.fetchContainerInfo(ctx, cfgContainersLifecycle)
|
||||
if err != nil {
|
||||
a.log.Fatal(logs.CouldNotFetchLifecycleContainerInfo, zap.Error(err))
|
||||
|
@ -213,7 +213,7 @@ func (a *App) initLayer(ctx context.Context) {
|
|||
}
|
||||
|
||||
layerCfg := &layer.Config{
|
||||
Cache: layer.NewCache(getCacheOptions(a.cfg, a.log)),
|
||||
Cache: layer.NewCache(getCacheOptions(a.cfg.config(), a.log)),
|
||||
AnonKey: layer.AnonymousKey{
|
||||
Key: randomKey,
|
||||
},
|
||||
|
@ -516,7 +516,7 @@ func (a *App) initMetrics() {
|
|||
Logger: a.log,
|
||||
PoolStatistics: frostfs.NewPoolStatistic(a.pool),
|
||||
TreeStatistic: a.treePool,
|
||||
Enabled: a.cfg.GetBool(cfgPrometheusEnabled),
|
||||
Enabled: a.cfg.config().GetBool(cfgPrometheusEnabled),
|
||||
}
|
||||
|
||||
a.metrics = metrics.NewAppMetrics(cfg)
|
||||
|
@ -525,9 +525,9 @@ func (a *App) initMetrics() {
|
|||
|
||||
func (a *App) initFrostfsID(ctx context.Context) {
|
||||
cli, err := ffidcontract.New(ctx, ffidcontract.Config{
|
||||
RPCAddress: a.cfg.GetString(cfgRPCEndpoint),
|
||||
Contract: a.cfg.GetString(cfgFrostfsIDContract),
|
||||
ProxyContract: a.cfg.GetString(cfgProxyContract),
|
||||
RPCAddress: a.cfg.config().GetString(cfgRPCEndpoint),
|
||||
Contract: a.cfg.config().GetString(cfgFrostfsIDContract),
|
||||
ProxyContract: a.cfg.config().GetString(cfgProxyContract),
|
||||
Key: a.key,
|
||||
Waiter: commonclient.WaiterOptions{
|
||||
IgnoreAlreadyExistsError: false,
|
||||
|
@ -539,7 +539,7 @@ func (a *App) initFrostfsID(ctx context.Context) {
|
|||
}
|
||||
|
||||
a.frostfsid, err = frostfsid.NewFrostFSID(frostfsid.Config{
|
||||
Cache: cache.NewFrostfsIDCache(getFrostfsIDCacheConfig(a.cfg, a.log)),
|
||||
Cache: cache.NewFrostfsIDCache(getFrostfsIDCacheConfig(a.cfg.config(), a.log)),
|
||||
FrostFSID: cli,
|
||||
Logger: a.log,
|
||||
})
|
||||
|
@ -550,9 +550,9 @@ func (a *App) initFrostfsID(ctx context.Context) {
|
|||
|
||||
func (a *App) initPolicyStorage(ctx context.Context) {
|
||||
policyContract, err := contract.New(ctx, contract.Config{
|
||||
RPCAddress: a.cfg.GetString(cfgRPCEndpoint),
|
||||
Contract: a.cfg.GetString(cfgPolicyContract),
|
||||
ProxyContract: a.cfg.GetString(cfgProxyContract),
|
||||
RPCAddress: a.cfg.config().GetString(cfgRPCEndpoint),
|
||||
Contract: a.cfg.config().GetString(cfgPolicyContract),
|
||||
ProxyContract: a.cfg.config().GetString(cfgProxyContract),
|
||||
Key: a.key,
|
||||
Waiter: commonclient.WaiterOptions{
|
||||
IgnoreAlreadyExistsError: false,
|
||||
|
@ -565,7 +565,7 @@ func (a *App) initPolicyStorage(ctx context.Context) {
|
|||
|
||||
a.policyStorage = policy.NewStorage(policy.StorageConfig{
|
||||
Contract: policyContract,
|
||||
Cache: cache.NewMorphPolicyCache(getMorphPolicyCacheConfig(a.cfg, a.log)),
|
||||
Cache: cache.NewMorphPolicyCache(getMorphPolicyCacheConfig(a.cfg.config(), a.log)),
|
||||
Log: a.log,
|
||||
})
|
||||
}
|
||||
|
@ -581,13 +581,13 @@ func (a *App) initResolver() {
|
|||
func (a *App) getResolverConfig() *resolver.Config {
|
||||
return &resolver.Config{
|
||||
FrostFS: frostfs.NewResolverFrostFS(a.pool),
|
||||
RPCAddress: a.cfg.GetString(cfgRPCEndpoint),
|
||||
RPCAddress: a.cfg.config().GetString(cfgRPCEndpoint),
|
||||
}
|
||||
}
|
||||
|
||||
func (a *App) getResolverOrder() []string {
|
||||
order := a.cfg.GetStringSlice(cfgResolveOrder)
|
||||
if a.cfg.GetString(cfgRPCEndpoint) == "" {
|
||||
order := a.cfg.config().GetStringSlice(cfgResolveOrder)
|
||||
if a.cfg.config().GetString(cfgRPCEndpoint) == "" {
|
||||
order = remove(order, resolver.NNSResolver)
|
||||
a.log.Warn(logs.ResolverNNSWontBeUsedSinceRPCEndpointIsntProvided)
|
||||
}
|
||||
|
@ -605,15 +605,15 @@ func (a *App) initTracing(ctx context.Context) {
|
|||
instanceID = a.servers[0].Address()
|
||||
}
|
||||
cfg := tracing.Config{
|
||||
Enabled: a.cfg.GetBool(cfgTracingEnabled),
|
||||
Exporter: tracing.Exporter(a.cfg.GetString(cfgTracingExporter)),
|
||||
Endpoint: a.cfg.GetString(cfgTracingEndpoint),
|
||||
Enabled: a.cfg.config().GetBool(cfgTracingEnabled),
|
||||
Exporter: tracing.Exporter(a.cfg.config().GetString(cfgTracingExporter)),
|
||||
Endpoint: a.cfg.config().GetString(cfgTracingEndpoint),
|
||||
Service: "frostfs-s3-gw",
|
||||
InstanceID: instanceID,
|
||||
Version: version.Version,
|
||||
}
|
||||
|
||||
if trustedCa := a.cfg.GetString(cfgTracingTrustedCa); trustedCa != "" {
|
||||
if trustedCa := a.cfg.config().GetString(cfgTracingTrustedCa); trustedCa != "" {
|
||||
caBytes, err := os.ReadFile(trustedCa)
|
||||
if err != nil {
|
||||
a.log.Warn(logs.FailedToInitializeTracing, zap.Error(err))
|
||||
|
@ -628,7 +628,7 @@ func (a *App) initTracing(ctx context.Context) {
|
|||
cfg.ServerCaCertPool = certPool
|
||||
}
|
||||
|
||||
attributes, err := fetchTracingAttributes(a.cfg)
|
||||
attributes, err := fetchTracingAttributes(a.cfg.config())
|
||||
if err != nil {
|
||||
a.log.Warn(logs.FailedToInitializeTracing, zap.Error(err))
|
||||
return
|
||||
|
@ -805,10 +805,10 @@ func (a *App) Serve(ctx context.Context) {
|
|||
srv := new(http.Server)
|
||||
srv.Handler = chiRouter
|
||||
srv.ErrorLog = zap.NewStdLog(a.log)
|
||||
srv.ReadTimeout = a.cfg.GetDuration(cfgWebReadTimeout)
|
||||
srv.ReadHeaderTimeout = a.cfg.GetDuration(cfgWebReadHeaderTimeout)
|
||||
srv.WriteTimeout = a.cfg.GetDuration(cfgWebWriteTimeout)
|
||||
srv.IdleTimeout = a.cfg.GetDuration(cfgWebIdleTimeout)
|
||||
srv.ReadTimeout = a.cfg.config().GetDuration(cfgWebReadTimeout)
|
||||
srv.ReadHeaderTimeout = a.cfg.config().GetDuration(cfgWebReadHeaderTimeout)
|
||||
srv.WriteTimeout = a.cfg.config().GetDuration(cfgWebWriteTimeout)
|
||||
srv.IdleTimeout = a.cfg.config().GetDuration(cfgWebIdleTimeout)
|
||||
|
||||
a.startServices()
|
||||
|
||||
|
@ -861,11 +861,11 @@ func shutdownContext() (context.Context, context.CancelFunc) {
|
|||
func (a *App) configReload(ctx context.Context) {
|
||||
a.log.Info(logs.SIGHUPConfigReloadStarted)
|
||||
|
||||
if !a.cfg.IsSet(cmdConfig) && !a.cfg.IsSet(cmdConfigDir) {
|
||||
if !a.cfg.config().IsSet(cmdConfig) && !a.cfg.config().IsSet(cmdConfigDir) {
|
||||
a.log.Warn(logs.FailedToReloadConfigBecauseItsMissed)
|
||||
return
|
||||
}
|
||||
if err := readInConfig(a.cfg); err != nil {
|
||||
if err := a.cfg.reload(); err != nil {
|
||||
a.log.Warn(logs.FailedToReloadConfig, zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
@ -885,7 +885,7 @@ func (a *App) configReload(ctx context.Context) {
|
|||
|
||||
a.updateSettings()
|
||||
|
||||
a.metrics.SetEnabled(a.cfg.GetBool(cfgPrometheusEnabled))
|
||||
a.metrics.SetEnabled(a.cfg.config().GetBool(cfgPrometheusEnabled))
|
||||
a.initTracing(ctx)
|
||||
a.setHealthStatus()
|
||||
|
||||
|
@ -893,33 +893,33 @@ func (a *App) configReload(ctx context.Context) {
|
|||
}
|
||||
|
||||
func (a *App) updateSettings() {
|
||||
if lvl, err := getLogLevel(a.cfg); err != nil {
|
||||
if lvl, err := getLogLevel(a.cfg.config()); err != nil {
|
||||
a.log.Warn(logs.LogLevelWontBeUpdated, zap.Error(err))
|
||||
} else {
|
||||
a.settings.logLevel.SetLevel(lvl)
|
||||
}
|
||||
|
||||
if err := a.settings.dialerSource.Update(fetchMultinetConfig(a.cfg, a.log)); err != nil {
|
||||
if err := a.settings.dialerSource.Update(fetchMultinetConfig(a.cfg.config(), a.log)); err != nil {
|
||||
a.log.Warn(logs.MultinetConfigWontBeUpdated, zap.Error(err))
|
||||
}
|
||||
|
||||
a.settings.update(a.cfg, a.log)
|
||||
a.settings.update(a.cfg.config(), a.log)
|
||||
}
|
||||
|
||||
func (a *App) startServices() {
|
||||
a.services = a.services[:0]
|
||||
|
||||
pprofService := NewPprofService(a.cfg, a.log)
|
||||
pprofService := NewPprofService(a.cfg.config(), a.log)
|
||||
a.services = append(a.services, pprofService)
|
||||
go pprofService.Start()
|
||||
|
||||
prometheusService := NewPrometheusService(a.cfg, a.log, a.metrics.Handler())
|
||||
prometheusService := NewPrometheusService(a.cfg.config(), a.log, a.metrics.Handler())
|
||||
a.services = append(a.services, prometheusService)
|
||||
go prometheusService.Start()
|
||||
}
|
||||
|
||||
func (a *App) initServers(ctx context.Context) {
|
||||
serversInfo := fetchServers(a.cfg, a.log)
|
||||
serversInfo := fetchServers(a.cfg.config(), a.log)
|
||||
|
||||
a.servers = make([]Server, 0, len(serversInfo))
|
||||
for _, serverInfo := range serversInfo {
|
||||
|
@ -946,7 +946,7 @@ func (a *App) initServers(ctx context.Context) {
|
|||
}
|
||||
|
||||
func (a *App) updateServers() error {
|
||||
serversInfo := fetchServers(a.cfg, a.log)
|
||||
serversInfo := fetchServers(a.cfg.config(), a.log)
|
||||
|
||||
a.mu.Lock()
|
||||
defer a.mu.Unlock()
|
||||
|
@ -1079,7 +1079,7 @@ func (a *App) setRuntimeParameters() {
|
|||
return
|
||||
}
|
||||
|
||||
softMemoryLimit := fetchSoftMemoryLimit(a.cfg)
|
||||
softMemoryLimit := fetchSoftMemoryLimit(a.cfg.config())
|
||||
previous := debug.SetMemoryLimit(softMemoryLimit)
|
||||
if softMemoryLimit != previous {
|
||||
a.log.Info(logs.RuntimeSoftMemoryLimitUpdated,
|
||||
|
@ -1155,7 +1155,7 @@ func (a *App) fetchContainerInfo(ctx context.Context, cfgKey string) (info *data
|
|||
}
|
||||
|
||||
func (a *App) resolveContainerID(ctx context.Context, cfgKey string) (cid.ID, error) {
|
||||
containerString := a.cfg.GetString(cfgKey)
|
||||
containerString := a.cfg.config().GetString(cfgKey)
|
||||
|
||||
var id cid.ID
|
||||
if err := id.DecodeString(containerString); err != nil {
|
||||
|
|
|
@ -10,6 +10,7 @@ import (
|
|||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
|
@ -286,6 +287,49 @@ var ignore = map[string]struct{}{
|
|||
cmdVersion: {},
|
||||
}
|
||||
|
||||
type appCfg struct {
|
||||
flags *pflag.FlagSet
|
||||
|
||||
mu sync.RWMutex
|
||||
settings *viper.Viper
|
||||
}
|
||||
|
||||
func (a *appCfg) reload() error {
|
||||
old := a.config()
|
||||
|
||||
v, err := newViper(a.flags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if old.IsSet(cmdConfig) {
|
||||
v.Set(cmdConfig, old.Get(cmdConfig))
|
||||
}
|
||||
if old.IsSet(cmdConfigDir) {
|
||||
v.Set(cmdConfigDir, old.Get(cmdConfigDir))
|
||||
}
|
||||
|
||||
if err = readInConfig(v); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
a.setConfig(v)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *appCfg) config() *viper.Viper {
|
||||
a.mu.RLock()
|
||||
defer a.mu.RUnlock()
|
||||
|
||||
return a.settings
|
||||
}
|
||||
|
||||
func (a *appCfg) setConfig(v *viper.Viper) {
|
||||
a.mu.Lock()
|
||||
a.settings = v
|
||||
a.mu.Unlock()
|
||||
}
|
||||
|
||||
func fetchConnectTimeout(cfg *viper.Viper) time.Duration {
|
||||
connTimeout := cfg.GetDuration(cfgConnectTimeout)
|
||||
if connTimeout <= 0 {
|
||||
|
@ -804,7 +848,7 @@ func fetchTracingAttributes(v *viper.Viper) (map[string]string, error) {
|
|||
return attributes, nil
|
||||
}
|
||||
|
||||
func newSettings() *viper.Viper {
|
||||
func newViper(flags *pflag.FlagSet) (*viper.Viper, error) {
|
||||
v := viper.New()
|
||||
|
||||
v.AutomaticEnv()
|
||||
|
@ -813,6 +857,16 @@ func newSettings() *viper.Viper {
|
|||
v.SetEnvKeyReplacer(strings.NewReplacer(".", "_"))
|
||||
v.AllowEmptyEnv(true)
|
||||
|
||||
setDefaults(v, flags)
|
||||
|
||||
if v.IsSet(cfgServer+".0."+cfgTLSKeyFile) && v.IsSet(cfgServer+".0."+cfgTLSCertFile) {
|
||||
v.Set(cfgServer+".0."+cfgTLSEnabled, true)
|
||||
}
|
||||
|
||||
return v, bindFlags(v, flags)
|
||||
}
|
||||
|
||||
func newSettings() *appCfg {
|
||||
// flags setup:
|
||||
flags := pflag.NewFlagSet("commandline", pflag.ExitOnError)
|
||||
flags.SetOutput(os.Stdout)
|
||||
|
@ -840,15 +894,71 @@ func newSettings() *viper.Viper {
|
|||
flags.String(cfgTLSCertFile, "", "TLS certificate file to use")
|
||||
flags.String(cfgTLSKeyFile, "", "TLS key file to use")
|
||||
|
||||
peers := flags.StringArrayP(cfgPeers, "p", nil, "set FrostFS nodes")
|
||||
flags.StringArrayP(cfgPeers, "p", nil, "set FrostFS nodes")
|
||||
|
||||
flags.StringP(cfgRPCEndpoint, "r", "", "set RPC endpoint")
|
||||
resolveMethods := flags.StringSlice(cfgResolveOrder, []string{resolver.DNSResolver}, "set bucket name resolve order")
|
||||
flags.StringSlice(cfgResolveOrder, []string{resolver.DNSResolver}, "set bucket name resolve order")
|
||||
|
||||
domains := flags.StringSliceP(cfgListenDomains, "d", nil, "set domains to be listened")
|
||||
flags.StringSliceP(cfgListenDomains, "d", nil, "set domains to be listened")
|
||||
|
||||
// set defaults:
|
||||
if err := flags.Parse(os.Args); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
v, err := newViper(flags)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("bind flags: %w", err))
|
||||
}
|
||||
|
||||
switch {
|
||||
case help != nil && *help:
|
||||
fmt.Printf("FrostFS S3 gateway %s\n", version.Version)
|
||||
flags.PrintDefaults()
|
||||
|
||||
fmt.Println()
|
||||
fmt.Println("Default environments:")
|
||||
fmt.Println()
|
||||
keys := v.AllKeys()
|
||||
sort.Strings(keys)
|
||||
|
||||
for i := range keys {
|
||||
if _, ok := ignore[keys[i]]; ok {
|
||||
continue
|
||||
}
|
||||
|
||||
defaultValue := v.GetString(keys[i])
|
||||
if len(defaultValue) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
k := strings.Replace(keys[i], ".", "_", -1)
|
||||
fmt.Printf("%s_%s = %s\n", envPrefix, strings.ToUpper(k), defaultValue)
|
||||
}
|
||||
|
||||
fmt.Println()
|
||||
fmt.Println("Peers preset:")
|
||||
fmt.Println()
|
||||
|
||||
fmt.Printf("%s_%s_[N]_ADDRESS = string\n", envPrefix, strings.ToUpper(cfgPeers))
|
||||
fmt.Printf("%s_%s_[N]_WEIGHT = 0..1 (float)\n", envPrefix, strings.ToUpper(cfgPeers))
|
||||
|
||||
os.Exit(0)
|
||||
case versionFlag != nil && *versionFlag:
|
||||
fmt.Printf("FrostFS S3 Gateway\nVersion: %s\nGoVersion: %s\n", version.Version, runtime.Version())
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
if err = readInConfig(v); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return &appCfg{
|
||||
flags: flags,
|
||||
settings: v,
|
||||
}
|
||||
}
|
||||
|
||||
func setDefaults(v *viper.Viper, flags *pflag.FlagSet) {
|
||||
v.SetDefault(cfgAccessBoxCacheRemovingCheckInterval, defaultAccessBoxCacheRemovingCheckInterval)
|
||||
|
||||
// logger:
|
||||
|
@ -909,78 +1019,21 @@ func newSettings() *viper.Viper {
|
|||
// multinet
|
||||
v.SetDefault(cfgMultinetFallbackDelay, defaultMultinetFallbackDelay)
|
||||
|
||||
// Bind flags
|
||||
if err := bindFlags(v, flags); err != nil {
|
||||
panic(fmt.Errorf("bind flags: %w", err))
|
||||
if resolveMethods, err := flags.GetStringSlice(cfgResolveOrder); err == nil {
|
||||
v.SetDefault(cfgResolveOrder, resolveMethods)
|
||||
}
|
||||
|
||||
if err := flags.Parse(os.Args); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
if v.IsSet(cfgServer+".0."+cfgTLSKeyFile) && v.IsSet(cfgServer+".0."+cfgTLSCertFile) {
|
||||
v.Set(cfgServer+".0."+cfgTLSEnabled, true)
|
||||
}
|
||||
|
||||
if resolveMethods != nil {
|
||||
v.SetDefault(cfgResolveOrder, *resolveMethods)
|
||||
}
|
||||
|
||||
if peers != nil && len(*peers) > 0 {
|
||||
for i := range *peers {
|
||||
v.SetDefault(cfgPeers+"."+strconv.Itoa(i)+".address", (*peers)[i])
|
||||
if peers, err := flags.GetStringArray(cfgPeers); err == nil {
|
||||
for i := range peers {
|
||||
v.SetDefault(cfgPeers+"."+strconv.Itoa(i)+".address", peers[i])
|
||||
v.SetDefault(cfgPeers+"."+strconv.Itoa(i)+".weight", 1)
|
||||
v.SetDefault(cfgPeers+"."+strconv.Itoa(i)+".priority", 1)
|
||||
}
|
||||
}
|
||||
|
||||
if domains != nil && len(*domains) > 0 {
|
||||
v.SetDefault(cfgListenDomains, *domains)
|
||||
if domains, err := flags.GetStringSlice(cfgListenDomains); err == nil && len(domains) > 0 {
|
||||
v.SetDefault(cfgListenDomains, domains)
|
||||
}
|
||||
|
||||
switch {
|
||||
case help != nil && *help:
|
||||
fmt.Printf("FrostFS S3 gateway %s\n", version.Version)
|
||||
flags.PrintDefaults()
|
||||
|
||||
fmt.Println()
|
||||
fmt.Println("Default environments:")
|
||||
fmt.Println()
|
||||
keys := v.AllKeys()
|
||||
sort.Strings(keys)
|
||||
|
||||
for i := range keys {
|
||||
if _, ok := ignore[keys[i]]; ok {
|
||||
continue
|
||||
}
|
||||
|
||||
defaultValue := v.GetString(keys[i])
|
||||
if len(defaultValue) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
k := strings.Replace(keys[i], ".", "_", -1)
|
||||
fmt.Printf("%s_%s = %s\n", envPrefix, strings.ToUpper(k), defaultValue)
|
||||
}
|
||||
|
||||
fmt.Println()
|
||||
fmt.Println("Peers preset:")
|
||||
fmt.Println()
|
||||
|
||||
fmt.Printf("%s_%s_[N]_ADDRESS = string\n", envPrefix, strings.ToUpper(cfgPeers))
|
||||
fmt.Printf("%s_%s_[N]_WEIGHT = 0..1 (float)\n", envPrefix, strings.ToUpper(cfgPeers))
|
||||
|
||||
os.Exit(0)
|
||||
case versionFlag != nil && *versionFlag:
|
||||
fmt.Printf("FrostFS S3 Gateway\nVersion: %s\nGoVersion: %s\n", version.Version, runtime.Version())
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
if err := readInConfig(v); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return v
|
||||
}
|
||||
|
||||
func bindFlags(v *viper.Viper, flags *pflag.FlagSet) error {
|
||||
|
|
|
@ -9,10 +9,10 @@ import (
|
|||
func main() {
|
||||
g, _ := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
|
||||
|
||||
v := newSettings()
|
||||
l := pickLogger(v)
|
||||
cfg := newSettings()
|
||||
l := pickLogger(cfg.config())
|
||||
|
||||
a := newApp(g, l, v)
|
||||
a := newApp(g, l, cfg)
|
||||
|
||||
go a.Serve(g)
|
||||
|
||||
|
|
Loading…
Reference in a new issue