azure: add support for hot, cool, or cool access tiers

This commit is contained in:
Srigovind Nayak 2024-10-20 15:27:21 +05:30
parent 5fe6607127
commit bff3341d10
No known key found for this signature in database
GPG key ID: 3C4A72A34ABD4C43
4 changed files with 68 additions and 7 deletions

View file

@ -0,0 +1,21 @@
Enhancement: Add config option to set Microsoft Blob Storage Access Tier
The `azure.access-tier` option can be passed to Restic (using `-o`) to
specify the access tier for Microsoft Blob Storage objects created by Restic.
The access tier is passed as-is to Microsoft Blob Storage, so it needs to be
understood by the API. The allowed values are `Hot`, `Cool`, or `Cold`.
If unspecified, the default is inferred from the default configured on the
storage account.
You can mix access tiers in the same container, and the setting isn't
stored in the restic repository, so be sure to specify it with each
command that writes to Microsoft Blob Storage.
There is no official `Archive` storage support in restic, use this option at
your own risk. To restore any data, it is still necessary to manually warm up
the required data in the `Archive` tier.
https://github.com/restic/restic/issues/4521
https://github.com/restic/restic/pull/5046

View file

@ -568,6 +568,10 @@ The number of concurrent connections to the Azure Blob Storage service can be se
``-o azure.connections=10`` switch. By default, at most five parallel connections are
established.
The access tier of the blobs uploaded to the Azure Blob Storage service can be set with the
``-o azure.access-tier=Cool`` switch. The allowed values are ``Hot``, ``Cool`` or ``Cold``.
If unspecified, the default is inferred from the default configured on the storage account.
Google Cloud Storage
********************

View file

@ -37,6 +37,8 @@ type Backend struct {
prefix string
listMaxItems int
layout.Layout
accessTier blob.AccessTier
}
const saveLargeSize = 256 * 1024 * 1024
@ -124,17 +126,32 @@ func open(cfg Config, rt http.RoundTripper) (*Backend, error) {
}
}
var accessTier blob.AccessTier
// if the access tier is not supported, then we will not set the access tier; during the upload process,
// the value will be inferred from the default configured on the storage account.
for _, tier := range supportedAccessTiers() {
if strings.EqualFold(string(tier), cfg.AccessTier) {
accessTier = tier
break
}
}
be := &Backend{
container: client,
cfg: cfg,
connections: cfg.Connections,
Layout: layout.NewDefaultLayout(cfg.Prefix, path.Join),
listMaxItems: defaultListMaxItems,
accessTier: accessTier,
}
return be, nil
}
func supportedAccessTiers() []blob.AccessTier {
return []blob.AccessTier{blob.AccessTierHot, blob.AccessTierCool, blob.AccessTierCold, blob.AccessTierArchive}
}
// Open opens the Azure backend at specified container.
func Open(_ context.Context, cfg Config, rt http.RoundTripper) (*Backend, error) {
return open(cfg, rt)
@ -213,25 +230,39 @@ func (be *Backend) Path() string {
return be.prefix
}
// useAccessTier determines whether to apply the configured access tier to a given file.
// For archive access tier, only data files are stored using that class; metadata
// must remain instantly accessible.
func (be *Backend) useAccessTier(h backend.Handle) bool {
notArchiveClass := !strings.EqualFold(be.cfg.AccessTier, "archive")
isDataFile := h.Type == backend.PackFile && !h.IsMetadata
return isDataFile || notArchiveClass
}
// Save stores data in the backend at the handle.
func (be *Backend) Save(ctx context.Context, h backend.Handle, rd backend.RewindReader) error {
objName := be.Filename(h)
debug.Log("InsertObject(%v, %v)", be.cfg.AccountName, objName)
var accessTier blob.AccessTier
if be.useAccessTier(h) {
accessTier = be.accessTier
}
var err error
if rd.Length() < saveLargeSize {
// if it's smaller than 256miB, then just create the file directly from the reader
err = be.saveSmall(ctx, objName, rd)
err = be.saveSmall(ctx, objName, rd, accessTier)
} else {
// otherwise use the more complicated method
err = be.saveLarge(ctx, objName, rd)
err = be.saveLarge(ctx, objName, rd, accessTier)
}
return err
}
func (be *Backend) saveSmall(ctx context.Context, objName string, rd backend.RewindReader) error {
func (be *Backend) saveSmall(ctx context.Context, objName string, rd backend.RewindReader, accessTier blob.AccessTier) error {
blockBlobClient := be.container.NewBlockBlobClient(objName)
// upload it as a new "block", use the base64 hash for the ID
@ -252,11 +283,13 @@ func (be *Backend) saveSmall(ctx context.Context, objName string, rd backend.Rew
}
blocks := []string{id}
_, err = blockBlobClient.CommitBlockList(ctx, blocks, &blockblob.CommitBlockListOptions{})
_, err = blockBlobClient.CommitBlockList(ctx, blocks, &blockblob.CommitBlockListOptions{
Tier: &accessTier,
})
return errors.Wrap(err, "CommitBlockList")
}
func (be *Backend) saveLarge(ctx context.Context, objName string, rd backend.RewindReader) error {
func (be *Backend) saveLarge(ctx context.Context, objName string, rd backend.RewindReader, accessTier blob.AccessTier) error {
blockBlobClient := be.container.NewBlockBlobClient(objName)
buf := make([]byte, 100*1024*1024)
@ -303,7 +336,9 @@ func (be *Backend) saveLarge(ctx context.Context, objName string, rd backend.Rew
return errors.Errorf("wrote %d bytes instead of the expected %d bytes", uploadedBytes, rd.Length())
}
_, err := blockBlobClient.CommitBlockList(ctx, blocks, &blockblob.CommitBlockListOptions{})
_, err := blockBlobClient.CommitBlockList(ctx, blocks, &blockblob.CommitBlockListOptions{
Tier: &accessTier,
})
debug.Log("uploaded %d parts: %v", len(blocks), blocks)
return errors.Wrap(err, "CommitBlockList")

View file

@ -22,7 +22,8 @@ type Config struct {
Container string
Prefix string
Connections uint `option:"connections" help:"set a limit for the number of concurrent connections (default: 5)"`
Connections uint `option:"connections" help:"set a limit for the number of concurrent connections (default: 5)"`
AccessTier string `option:"access-tier" help:"set the access tier for the blob storage (default: inferred from the storage account defaults)"`
}
// NewConfig returns a new Config with the default values filled in.