forked from TrueCloudLab/frostfs-node
Dmitrii Stepanov
45438e7b06
Rename execCtx to request. Move code to appropriate files. Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
118 lines
3.1 KiB
Go
118 lines
3.1 KiB
Go
package getsvc
|
|
|
|
import (
|
|
"context"
|
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
|
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
|
"go.uber.org/zap"
|
|
)
|
|
|
|
// Get serves a request to get an object by address, and returns Streamer instance.
|
|
func (s *Service) Get(ctx context.Context, prm Prm) error {
|
|
return s.get(ctx, RequestParameters{
|
|
commonPrm: prm.commonPrm,
|
|
})
|
|
}
|
|
|
|
// GetRange serves a request to get an object by address, and returns Streamer instance.
|
|
func (s *Service) GetRange(ctx context.Context, prm RangePrm) error {
|
|
return s.get(ctx, RequestParameters{
|
|
commonPrm: prm.commonPrm,
|
|
rng: prm.rng,
|
|
})
|
|
}
|
|
func (s *Service) GetRangeHash(ctx context.Context, prm RangeHashPrm) (*RangeHashRes, error) {
|
|
hashes := make([][]byte, 0, len(prm.rngs))
|
|
|
|
for _, rng := range prm.rngs {
|
|
h := prm.hashGen()
|
|
|
|
// For big ranges we could fetch range-hashes from different nodes and concatenate them locally.
|
|
// However,
|
|
// 1. Potential gains are insignificant when operating in the Internet given typical latencies and losses.
|
|
// 2. Parallel solution is more complex in terms of code.
|
|
// 3. TZ-hash is likely to be disabled in private installations.
|
|
reqPrm := RequestParameters{
|
|
commonPrm: prm.commonPrm,
|
|
rng: &rng,
|
|
}
|
|
reqPrm.SetChunkWriter(&hasherWrapper{
|
|
hash: util.NewSaltingWriter(h, prm.salt),
|
|
})
|
|
|
|
if err := s.get(ctx, reqPrm); err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
hashes = append(hashes, h.Sum(nil))
|
|
}
|
|
|
|
return &RangeHashRes{
|
|
hashes: hashes,
|
|
}, nil
|
|
}
|
|
|
|
// Head reads object header from container.
|
|
//
|
|
// Returns ErrNotFound if the header was not received for the call.
|
|
// Returns SplitInfoError if object is virtual and raw flag is set.
|
|
func (s *Service) Head(ctx context.Context, prm HeadPrm) error {
|
|
return s.get(ctx, RequestParameters{
|
|
head: true,
|
|
commonPrm: prm.commonPrm,
|
|
})
|
|
}
|
|
|
|
func (s *Service) get(ctx context.Context, prm RequestParameters) error {
|
|
exec := &request{
|
|
keyStore: s.keyStore,
|
|
traverserGenerator: s.traverserGenerator,
|
|
remoteStorageConstructor: s.remoteStorageConstructor,
|
|
epochSource: s.epochSource,
|
|
localStorage: s.localStorage,
|
|
|
|
prm: prm,
|
|
infoSplit: object.NewSplitInfo(),
|
|
}
|
|
|
|
exec.setLogger(s.log)
|
|
|
|
exec.execute(ctx)
|
|
|
|
return exec.statusError.err
|
|
}
|
|
|
|
func (exec *request) execute(ctx context.Context) {
|
|
exec.log.Debug(logs.ServingRequest)
|
|
|
|
// perform local operation
|
|
exec.executeLocal(ctx)
|
|
|
|
exec.analyzeStatus(ctx, true)
|
|
}
|
|
|
|
func (exec *request) analyzeStatus(ctx context.Context, execCnr bool) {
|
|
// analyze local result
|
|
switch exec.status {
|
|
case statusOK:
|
|
exec.log.Debug(logs.OperationFinishedSuccessfully)
|
|
case statusINHUMED:
|
|
exec.log.Debug(logs.GetRequestedObjectWasMarkedAsRemoved)
|
|
case statusVIRTUAL:
|
|
exec.log.Debug(logs.GetRequestedObjectIsVirtual)
|
|
exec.assemble(ctx)
|
|
case statusOutOfRange:
|
|
exec.log.Debug(logs.GetRequestedRangeIsOutOfObjectBounds)
|
|
default:
|
|
exec.log.Debug(logs.OperationFinishedWithError,
|
|
zap.String("error", exec.err.Error()),
|
|
)
|
|
|
|
if execCnr {
|
|
exec.executeOnContainer(ctx)
|
|
exec.analyzeStatus(ctx, false)
|
|
}
|
|
}
|
|
}
|