155 lines
4 KiB
Go
155 lines
4 KiB
Go
|
package client
|
||
|
|
||
|
import (
|
||
|
"context"
|
||
|
"crypto/ecdsa"
|
||
|
"errors"
|
||
|
"fmt"
|
||
|
"io"
|
||
|
|
||
|
v2object "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
|
||
|
rpcapi "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc"
|
||
|
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
|
||
|
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/signature"
|
||
|
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||
|
)
|
||
|
|
||
|
func (c *Client) objectPutInitRaw(ctx context.Context, prm PrmObjectPutInit) (*objectWriterRaw, error) {
|
||
|
var w objectWriterRaw
|
||
|
stream, err := rpcapi.PutObject(&c.c, &w.respV2, client.WithContext(ctx))
|
||
|
if err != nil {
|
||
|
return nil, fmt.Errorf("open stream: %w", err)
|
||
|
}
|
||
|
|
||
|
w.key = &c.prm.key
|
||
|
if prm.key != nil {
|
||
|
w.key = prm.key
|
||
|
}
|
||
|
w.client = c
|
||
|
w.stream = stream
|
||
|
w.partInit.SetCopiesNumber(prm.copyNum)
|
||
|
w.req.SetBody(new(v2object.PutRequestBody))
|
||
|
if prm.maxChunkLen > 0 {
|
||
|
w.maxChunkLen = prm.maxChunkLen
|
||
|
} else {
|
||
|
w.maxChunkLen = defaultGRPCPayloadChunkLen
|
||
|
}
|
||
|
c.prepareRequest(&w.req, &prm.meta)
|
||
|
return &w, nil
|
||
|
}
|
||
|
|
||
|
type objectWriterRaw struct {
|
||
|
client *Client
|
||
|
stream interface {
|
||
|
Write(*v2object.PutRequest) error
|
||
|
Close() error
|
||
|
}
|
||
|
|
||
|
key *ecdsa.PrivateKey
|
||
|
res ResObjectPut
|
||
|
err error
|
||
|
chunkCalled bool
|
||
|
respV2 v2object.PutResponse
|
||
|
req v2object.PutRequest
|
||
|
partInit v2object.PutObjectPartInit
|
||
|
partChunk v2object.PutObjectPartChunk
|
||
|
maxChunkLen int
|
||
|
}
|
||
|
|
||
|
func (x *objectWriterRaw) WriteHeader(_ context.Context, hdr object.Object) bool {
|
||
|
v2Hdr := hdr.ToV2()
|
||
|
|
||
|
x.partInit.SetObjectID(v2Hdr.GetObjectID())
|
||
|
x.partInit.SetHeader(v2Hdr.GetHeader())
|
||
|
x.partInit.SetSignature(v2Hdr.GetSignature())
|
||
|
|
||
|
x.req.GetBody().SetObjectPart(&x.partInit)
|
||
|
x.req.SetVerificationHeader(nil)
|
||
|
|
||
|
x.err = signature.SignServiceMessage(x.key, &x.req)
|
||
|
if x.err != nil {
|
||
|
x.err = fmt.Errorf("sign message: %w", x.err)
|
||
|
return false
|
||
|
}
|
||
|
|
||
|
x.err = x.stream.Write(&x.req)
|
||
|
return x.err == nil
|
||
|
}
|
||
|
|
||
|
func (x *objectWriterRaw) WritePayloadChunk(_ context.Context, chunk []byte) bool {
|
||
|
if !x.chunkCalled {
|
||
|
x.chunkCalled = true
|
||
|
x.req.GetBody().SetObjectPart(&x.partChunk)
|
||
|
}
|
||
|
|
||
|
for ln := len(chunk); ln > 0; ln = len(chunk) {
|
||
|
if ln > x.maxChunkLen {
|
||
|
ln = x.maxChunkLen
|
||
|
}
|
||
|
|
||
|
// we deal with size limit overflow above, but there is another case:
|
||
|
// what if method is called with "small" chunk many times? We write
|
||
|
// a message to the stream on each call. Alternatively, we could use buffering.
|
||
|
// In most cases, the chunk length does not vary between calls. Given this
|
||
|
// assumption, as well as the length of the payload from the header, it is
|
||
|
// possible to buffer the data of intermediate chunks, and send a message when
|
||
|
// the allocated buffer is filled, or when the last chunk is received.
|
||
|
// It is mentally assumed that allocating and filling the buffer is better than
|
||
|
// synchronous sending, but this needs to be tested.
|
||
|
x.partChunk.SetChunk(chunk[:ln])
|
||
|
x.req.SetVerificationHeader(nil)
|
||
|
|
||
|
x.err = signature.SignServiceMessage(x.key, &x.req)
|
||
|
if x.err != nil {
|
||
|
x.err = fmt.Errorf("sign message: %w", x.err)
|
||
|
return false
|
||
|
}
|
||
|
|
||
|
x.err = x.stream.Write(&x.req)
|
||
|
if x.err != nil {
|
||
|
return false
|
||
|
}
|
||
|
|
||
|
chunk = chunk[ln:]
|
||
|
}
|
||
|
|
||
|
return true
|
||
|
}
|
||
|
|
||
|
func (x *objectWriterRaw) Close(_ context.Context) (*ResObjectPut, error) {
|
||
|
// Ignore io.EOF error, because it is expected error for client-side
|
||
|
// stream termination by the server. E.g. when stream contains invalid
|
||
|
// message. Server returns an error in response message (in status).
|
||
|
if x.err != nil && !errors.Is(x.err, io.EOF) {
|
||
|
return nil, x.err
|
||
|
}
|
||
|
|
||
|
if x.err = x.stream.Close(); x.err != nil {
|
||
|
return nil, x.err
|
||
|
}
|
||
|
|
||
|
x.res.st, x.err = x.client.processResponse(&x.respV2)
|
||
|
if x.err != nil {
|
||
|
return nil, x.err
|
||
|
}
|
||
|
|
||
|
if !apistatus.IsSuccessful(x.res.st) {
|
||
|
return &x.res, nil
|
||
|
}
|
||
|
|
||
|
const fieldID = "ID"
|
||
|
|
||
|
idV2 := x.respV2.GetBody().GetObjectID()
|
||
|
if idV2 == nil {
|
||
|
return nil, newErrMissingResponseField(fieldID)
|
||
|
}
|
||
|
|
||
|
x.err = x.res.obj.ReadFromV2(*idV2)
|
||
|
if x.err != nil {
|
||
|
x.err = newErrInvalidResponseField(fieldID, x.err)
|
||
|
}
|
||
|
|
||
|
return &x.res, nil
|
||
|
}
|