[#334] Automation of PATCH in S3
Some checks failed
DCO action / DCO (pull_request) Has been cancelled
Some checks failed
DCO action / DCO (pull_request) Has been cancelled
Signed-off-by: Kirill Sosnovskikh <k.sosnovskikh@yadro.com>
This commit is contained in:
parent
ee7d9df4a9
commit
f446d4b740
6 changed files with 157 additions and 6 deletions
|
@ -46,9 +46,9 @@ class HttpClient:
|
||||||
logger.info(f"Response: {response.status_code} => {response.text}")
|
logger.info(f"Response: {response.status_code} => {response.text}")
|
||||||
|
|
||||||
if expected_status_code:
|
if expected_status_code:
|
||||||
assert response.status_code == expected_status_code, (
|
assert (
|
||||||
f"Got {response.status_code} response code" f" while {expected_status_code} expected"
|
response.status_code == expected_status_code
|
||||||
)
|
), f"Got {response.status_code} response code while {expected_status_code} expected"
|
||||||
|
|
||||||
return response
|
return response
|
||||||
|
|
||||||
|
|
|
@ -861,7 +861,7 @@ class AwsCliClient(S3ClientWrapper):
|
||||||
return response["Parts"]
|
return response["Parts"]
|
||||||
|
|
||||||
@reporter.step("Complete multipart upload S3")
|
@reporter.step("Complete multipart upload S3")
|
||||||
def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> None:
|
def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> dict:
|
||||||
if bucket.startswith("-") or " " in bucket:
|
if bucket.startswith("-") or " " in bucket:
|
||||||
bucket = f'"{bucket}"'
|
bucket = f'"{bucket}"'
|
||||||
|
|
||||||
|
|
|
@ -704,7 +704,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
|
||||||
return response["Parts"]
|
return response["Parts"]
|
||||||
|
|
||||||
@reporter.step("Complete multipart upload S3")
|
@reporter.step("Complete multipart upload S3")
|
||||||
def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> None:
|
def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> dict:
|
||||||
parts = [{"ETag": etag, "PartNumber": part_num} for part_num, etag in parts]
|
parts = [{"ETag": etag, "PartNumber": part_num} for part_num, etag in parts]
|
||||||
params = self._convert_to_s3_params(locals(), exclude=["parts"])
|
params = self._convert_to_s3_params(locals(), exclude=["parts"])
|
||||||
params["MultipartUpload"] = {"Parts": parts}
|
params["MultipartUpload"] = {"Parts": parts}
|
||||||
|
|
|
@ -336,7 +336,7 @@ class S3ClientWrapper(HumanReadableABC):
|
||||||
"""Lists the parts that have been uploaded for a specific multipart upload."""
|
"""Lists the parts that have been uploaded for a specific multipart upload."""
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> None:
|
def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> dict:
|
||||||
"""Completes a multipart upload by assembling previously uploaded parts."""
|
"""Completes a multipart upload by assembling previously uploaded parts."""
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
|
|
127
src/frostfs_testlib/s3/s3_http_client.py
Normal file
127
src/frostfs_testlib/s3/s3_http_client.py
Normal file
|
@ -0,0 +1,127 @@
|
||||||
|
import hashlib
|
||||||
|
import logging
|
||||||
|
import xml.etree.ElementTree as ET
|
||||||
|
|
||||||
|
import httpx
|
||||||
|
from botocore.auth import SigV4Auth
|
||||||
|
from botocore.awsrequest import AWSRequest
|
||||||
|
from botocore.credentials import Credentials
|
||||||
|
|
||||||
|
from frostfs_testlib import reporter
|
||||||
|
from frostfs_testlib.http.http_client import HttpClient
|
||||||
|
from frostfs_testlib.utils.file_utils import TestFile
|
||||||
|
|
||||||
|
logger = logging.getLogger("NeoLogger")
|
||||||
|
|
||||||
|
DEFAULT_TIMEOUT = 60.0
|
||||||
|
|
||||||
|
|
||||||
|
class S3HttpClient:
|
||||||
|
def __init__(
|
||||||
|
self, s3gate_endpoint: str, access_key_id: str, secret_access_key: str, profile: str = "default", region: str = "us-east-1"
|
||||||
|
) -> None:
|
||||||
|
self.http_client = HttpClient()
|
||||||
|
self.s3gate_endpoint = s3gate_endpoint
|
||||||
|
self.credentials = Credentials(access_key_id, secret_access_key)
|
||||||
|
self.profile = profile
|
||||||
|
self.region = region
|
||||||
|
self.service = "s3"
|
||||||
|
self.signature = SigV4Auth(self.credentials, self.service, self.region)
|
||||||
|
|
||||||
|
def _to_s3_header(self, header: str) -> dict:
|
||||||
|
replacement_map = {
|
||||||
|
"Acl": "ACL",
|
||||||
|
"_": "-",
|
||||||
|
}
|
||||||
|
|
||||||
|
result = header
|
||||||
|
if not header.startswith("x_amz"):
|
||||||
|
result = header.title()
|
||||||
|
|
||||||
|
for find, replace in replacement_map.items():
|
||||||
|
result = result.replace(find, replace)
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
def _convert_to_s3_headers(self, scope: dict, exclude: list[str] = None):
|
||||||
|
exclude = ["self", "cls"] if not exclude else exclude + ["self", "cls"]
|
||||||
|
return {self._to_s3_header(header): value for header, value in scope.items() if header not in exclude and value is not None}
|
||||||
|
|
||||||
|
def _create_aws_request(
|
||||||
|
self, method: str, url: str, headers: dict, content: str | bytes | TestFile = None, params: dict = None
|
||||||
|
) -> AWSRequest:
|
||||||
|
data = b""
|
||||||
|
|
||||||
|
if content is not None:
|
||||||
|
if isinstance(content, TestFile):
|
||||||
|
with open(content, "rb") as io_content:
|
||||||
|
data = io_content.read()
|
||||||
|
elif isinstance(content, str):
|
||||||
|
data = bytes(content, encoding="utf-8")
|
||||||
|
elif isinstance(content, bytes):
|
||||||
|
data = content
|
||||||
|
else:
|
||||||
|
raise RuntimeError(f"Content expected as a string, bytes or TestFile object, got: {content}")
|
||||||
|
|
||||||
|
headers["X-Amz-Content-SHA256"] = hashlib.sha256(data).hexdigest()
|
||||||
|
aws_request = AWSRequest(method, url, headers, data, params)
|
||||||
|
self.signature.add_auth(aws_request)
|
||||||
|
|
||||||
|
return aws_request
|
||||||
|
|
||||||
|
def _exec_request(
|
||||||
|
self,
|
||||||
|
method: str,
|
||||||
|
url: str,
|
||||||
|
headers: dict,
|
||||||
|
content: str | bytes | TestFile = None,
|
||||||
|
params: dict = None,
|
||||||
|
timeout: float = DEFAULT_TIMEOUT,
|
||||||
|
) -> dict:
|
||||||
|
aws_request = self._create_aws_request(method, url, headers, content, params)
|
||||||
|
response = self.http_client.send(
|
||||||
|
aws_request.method,
|
||||||
|
aws_request.url,
|
||||||
|
headers=dict(aws_request.headers),
|
||||||
|
data=aws_request.data,
|
||||||
|
params=aws_request.params,
|
||||||
|
timeout=timeout,
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
response.raise_for_status()
|
||||||
|
except httpx.HTTPStatusError:
|
||||||
|
raise httpx.HTTPStatusError(response.text, request=response.request, response=response)
|
||||||
|
|
||||||
|
root = ET.fromstring(response.read())
|
||||||
|
data = {
|
||||||
|
"LastModified": root.find(".//LastModified").text,
|
||||||
|
"ETag": root.find(".//ETag").text,
|
||||||
|
}
|
||||||
|
|
||||||
|
if response.headers.get("x-amz-version-id"):
|
||||||
|
data["VersionId"] = response.headers.get("x-amz-version-id")
|
||||||
|
|
||||||
|
return data
|
||||||
|
|
||||||
|
@reporter.step("Patch object S3")
|
||||||
|
def patch_object(
|
||||||
|
self,
|
||||||
|
bucket: str,
|
||||||
|
key: str,
|
||||||
|
content: str | bytes | TestFile,
|
||||||
|
content_range: str,
|
||||||
|
version_id: str = None,
|
||||||
|
if_match: str = None,
|
||||||
|
if_unmodified_since: str = None,
|
||||||
|
x_amz_expected_bucket_owner: str = None,
|
||||||
|
timeout: float = DEFAULT_TIMEOUT,
|
||||||
|
) -> dict:
|
||||||
|
if content_range and not content_range.startswith("bytes"):
|
||||||
|
content_range = f"bytes {content_range}/*"
|
||||||
|
|
||||||
|
url = f"{self.s3gate_endpoint}/{bucket}/{key}"
|
||||||
|
headers = self._convert_to_s3_headers(locals(), exclude=["bucket", "key", "content", "version_id", "timeout"])
|
||||||
|
params = {"VersionId": version_id} if version_id is not None else None
|
||||||
|
|
||||||
|
return self._exec_request("PATCH", url, headers, content, params, timeout=timeout)
|
|
@ -12,6 +12,7 @@ from frostfs_testlib.shell import Shell
|
||||||
from frostfs_testlib.steps.cli.container import search_nodes_with_container
|
from frostfs_testlib.steps.cli.container import search_nodes_with_container
|
||||||
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
|
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
|
||||||
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
||||||
|
from frostfs_testlib.utils.file_utils import TestFile, get_file_hash
|
||||||
|
|
||||||
logger = logging.getLogger("NeoLogger")
|
logger = logging.getLogger("NeoLogger")
|
||||||
|
|
||||||
|
@ -185,3 +186,26 @@ def search_nodes_with_bucket(
|
||||||
break
|
break
|
||||||
nodes_list = search_nodes_with_container(wallet=wallet, cid=cid, shell=shell, endpoint=endpoint, cluster=cluster)
|
nodes_list = search_nodes_with_container(wallet=wallet, cid=cid, shell=shell, endpoint=endpoint, cluster=cluster)
|
||||||
return nodes_list
|
return nodes_list
|
||||||
|
|
||||||
|
|
||||||
|
def get_bytes_relative_to_object(value: int | str, object_size: int = None, part_size: int = None) -> int:
|
||||||
|
if isinstance(value, int):
|
||||||
|
return value
|
||||||
|
|
||||||
|
if "part" not in value and "object" not in value:
|
||||||
|
return int(value)
|
||||||
|
|
||||||
|
if object_size is not None:
|
||||||
|
value = value.replace("object", str(object_size))
|
||||||
|
|
||||||
|
if part_size is not None:
|
||||||
|
value = value.replace("part", str(part_size))
|
||||||
|
|
||||||
|
return int(eval(value))
|
||||||
|
|
||||||
|
|
||||||
|
def get_range_relative_to_object(rng: str, object_size: int = None, part_size: int = None, int_values: bool = False) -> str | int:
|
||||||
|
start, end = rng.split(":")
|
||||||
|
start = get_bytes_relative_to_object(start, object_size, part_size)
|
||||||
|
end = get_bytes_relative_to_object(end, object_size, part_size)
|
||||||
|
return (start, end) if int_values else f"bytes {start}-{end}/*"
|
||||||
|
|
Loading…
Reference in a new issue