Compare commits

..

No commits in common. "v0.32.8" and "v0.32.7" have entirely different histories.

10 changed files with 16 additions and 58 deletions

View file

@ -16,7 +16,7 @@ jobs:
- name: Setup Go
uses: actions/setup-go@v3
with:
go-version: '1.23.6'
go-version: '1.23'
- name: Install govulncheck
run: go install golang.org/x/vuln/cmd/govulncheck@latest

View file

@ -4,23 +4,15 @@ This document outlines major changes between releases.
## [Unreleased]
## [0.32.8] - 2025-02-11
### Fixed
- Return 404 instead of 500 when object is missing in object storage and available in the tree (#626)
### Added
- `tree_stream_timeout` configuration parameter (#627)
## [0.32.7] - 2025-02-06
### Fixed
- Correct passing copies number during multipart upload (#623)
- Corrct passing copies number during multipart upload (#623)
## [0.32.6] - 2025-02-05
### Fixed
- Connection leak when `feature.tree_pool_netmap_support` is enabled (#622)
- Connection leak when `feature.tree_pool_netmap_support` is enabled.
## [0.32.5] - 2025-02-04
@ -444,6 +436,5 @@ To see CHANGELOG for older versions, refer to https://github.com/nspcc-dev/neofs
[0.32.4]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.32.3...v0.32.4
[0.32.5]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.32.4...v0.32.5
[0.32.6]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.32.5...v0.32.6
[0.32.7]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.32.6...v0.32.7
[0.32.8]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.32.7...v0.32.8
[Unreleased]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.32.8...master
[0.32.6]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.32.6...v0.32.7
[Unreleased]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.32.7...master

View file

@ -1 +1 @@
v0.32.8
v0.32.7

View file

@ -197,33 +197,6 @@ func TestGetObject(t *testing.T) {
getObjectAssertS3Error(hc, bktName, objName, emptyVersion, apierr.ErrNoSuchKey)
}
func TestGetDeletedObject(t *testing.T) {
hc := prepareHandlerContextWithMinCache(t)
bktName, objName := "bucket", "obj"
bktInfo, objInfo := createVersionedBucketAndObject(hc.t, hc, bktName, objName)
putObject(hc, bktName, objName)
checkFound(hc.t, hc, bktName, objName, objInfo.VersionID())
checkFound(hc.t, hc, bktName, objName, emptyVersion)
addr := getAddressOfLastVersion(hc, bktInfo, objName)
t.Run("not found error", func(_ *testing.T) {
hc.tp.SetObjectError(addr, &apistatus.ObjectNotFound{})
hc.tp.SetObjectError(objInfo.Address(), &apistatus.ObjectNotFound{})
getObjectAssertS3Error(hc, bktName, objName, objInfo.VersionID(), apierr.ErrNoSuchVersion)
getObjectAssertS3Error(hc, bktName, objName, emptyVersion, apierr.ErrNoSuchKey)
})
t.Run("already removed error", func(_ *testing.T) {
hc.tp.SetObjectError(addr, &apistatus.ObjectAlreadyRemoved{})
hc.tp.SetObjectError(objInfo.Address(), &apistatus.ObjectAlreadyRemoved{})
getObjectAssertS3Error(hc, bktName, objName, objInfo.VersionID(), apierr.ErrNoSuchVersion)
getObjectAssertS3Error(hc, bktName, objName, emptyVersion, apierr.ErrNoSuchKey)
})
}
func TestGetObjectEnabledMD5(t *testing.T) {
hc := prepareHandlerContext(t)
bktName, objName := "bucket", "obj"

View file

@ -410,7 +410,7 @@ func (n *Layer) headLastVersionIfNotDeleted(ctx context.Context, bkt *data.Bucke
meta, err := n.objectHead(ctx, bkt, node.OID)
if err != nil {
if client.IsErrObjectNotFound(err) || client.IsErrObjectAlreadyRemoved(err) {
if client.IsErrObjectNotFound(err) {
return nil, fmt.Errorf("%w: %s; %s", apierr.GetAPIError(apierr.ErrNoSuchKey), err.Error(), node.OID.EncodeToString())
}
return nil, err
@ -467,7 +467,7 @@ func (n *Layer) headVersion(ctx context.Context, bkt *data.BucketInfo, p *HeadOb
meta, err := n.objectHead(ctx, bkt, foundVersion.OID)
if err != nil {
if client.IsErrObjectNotFound(err) || client.IsErrObjectAlreadyRemoved(err) {
if client.IsErrObjectNotFound(err) {
return nil, fmt.Errorf("%w: %s", apierr.GetAPIError(apierr.ErrNoSuchVersion), err.Error())
}
return nil, err

View file

@ -779,8 +779,9 @@ func (a *App) initPools(ctx context.Context) {
prm.SetNodeDialTimeout(connTimeout)
prmTree.SetNodeDialTimeout(connTimeout)
prm.SetNodeStreamTimeout(fetchStreamTimeout(a.cfg, cfgStreamTimeout))
prmTree.SetNodeStreamTimeout(fetchStreamTimeout(a.cfg, cfgTreeStreamTimeout))
streamTimeout := fetchStreamTimeout(a.cfg)
prm.SetNodeStreamTimeout(streamTimeout)
prmTree.SetNodeStreamTimeout(streamTimeout)
healthCheckTimeout := fetchHealthCheckTimeout(a.cfg)
prm.SetHealthcheckTimeout(healthCheckTimeout)

View file

@ -122,7 +122,6 @@ const ( // Settings.
// Pool config.
cfgConnectTimeout = "connect_timeout"
cfgStreamTimeout = "stream_timeout"
cfgTreeStreamTimeout = "tree_stream_timeout"
cfgHealthcheckTimeout = "healthcheck_timeout"
cfgRebalanceInterval = "rebalance_interval"
cfgPoolErrorThreshold = "pool_error_threshold"
@ -320,8 +319,8 @@ func fetchReconnectInterval(cfg *viper.Viper) time.Duration {
return reconnect
}
func fetchStreamTimeout(cfg *viper.Viper, cfgEntry string) time.Duration {
streamTimeout := cfg.GetDuration(cfgEntry)
func fetchStreamTimeout(cfg *viper.Viper) time.Duration {
streamTimeout := cfg.GetDuration(cfgStreamTimeout)
if streamTimeout <= 0 {
streamTimeout = defaultStreamTimeout
}

View file

@ -80,10 +80,8 @@ S3_GW_PROMETHEUS_ADDRESS=localhost:8086
# Timeout to connect to a node
S3_GW_CONNECT_TIMEOUT=10s
# Timeout for individual operations in object pool streaming RPC.
# Timeout for individual operations in streaming RPC.
S3_GW_STREAM_TIMEOUT=10s
# Timeout for individual operations in tree pool streaming RPC.
S3_GW_TREE_STREAM_TIMEOUT=10s
# Timeout to check node health during rebalance.
S3_GW_HEALTHCHECK_TIMEOUT=15s
# Interval to check node health

View file

@ -100,10 +100,8 @@ tracing:
# Timeout to connect to a node
connect_timeout: 10s
# Timeout for individual operations in object pool streaming RPC.
# Timeout for individual operations in streaming RPC.
stream_timeout: 10s
# Timeout for individual operations in tree pool streaming RPC.
tree_stream_timeout: 10s
# Timeout to check node health during rebalance
healthcheck_timeout: 15s
# Interval to check node health

View file

@ -213,7 +213,6 @@ resolve_order:
connect_timeout: 10s
stream_timeout: 10s
tree_stream_timeout: 10s
healthcheck_timeout: 15s
rebalance_interval: 60s
pool_error_threshold: 100
@ -236,8 +235,7 @@ source_ip_header: "Source-Ip"
| `rpc_endpoint` | `string` | no | | The address of the RPC host to which the gateway connects to resolve bucket names and interact with frostfs contracts (required to use the `nns` resolver and `frostfsid` contract). |
| `resolve_order` | `[]string` | yes | `[dns]` | Order of bucket name resolvers to use. Available resolvers: `dns`, `nns`. |
| `connect_timeout` | `duration` | no | `10s` | Timeout to connect to a node. |
| `stream_timeout` | `duration` | no | `10s` | Timeout for individual operations in object pool streaming RPC. |
| `tree_stream_timeout` | `duration` | no | `10s` | Timeout for individual operations in tree pool streaming RPC. |
| `stream_timeout` | `duration` | no | `10s` | Timeout for individual operations in streaming RPC. |
| `healthcheck_timeout` | `duration` | no | `15s` | Timeout to check node health during rebalance. |
| `rebalance_interval` | `duration` | no | `60s` | Interval to check node health. |
| `pool_error_threshold` | `uint32` | no | `100` | The number of errors on connection after which node is considered as unhealthy. |