From 889d10ad26de4084dea16416f597a3ac88abd043 Mon Sep 17 00:00:00 2001
From: Yuval Lifshitz
Date: Thu, 8 Aug 2024 18:00:01 +0000
Subject: [PATCH 1/6] rgw/logging: basic set of tests for bucket logging
Signed-off-by: Yuval Lifshitz
---
README.rst | 14 +
pytest.ini | 1 +
s3tests_boto3/functional/test_s3.py | 757 +++++++++++++++++++++++++++-
3 files changed, 754 insertions(+), 18 deletions(-)
diff --git a/README.rst b/README.rst
index e34c7a11..283b7e87 100644
--- a/README.rst
+++ b/README.rst
@@ -100,3 +100,17 @@ You can filter tests based on their attributes::
S3TEST_CONF=your.conf tox -- s3tests_boto3/functional/test_iam.py -m 'not fails_on_rgw'
+========================
+ Bucket logging tests
+========================
+
+Ceph has extensions for the bucket logging S3 API. For the tests to cover these extensions, the following file: `examples/rgw/boto3/service-2.sdk-extras.json` from the Ceph repo,
+should be copied to the: `~/.aws/models/s3/2006-03-01/` directory on the machine where the tests are run.
+If the file is not present, the tests will still run, but the extension tests will be skipped. In this case, the bucket logging object roll time must be decreased manually from its default of
+300 seconds to 5 seconds::
+
+ vstart.sh -o rgw_bucket_log_object_roll_time=5
+
+Then the tests can be run with::
+
+ S3TEST_CONF=your.conf tox -- -m 'bucket_logging'
diff --git a/pytest.ini b/pytest.ini
index 1a7d9a83..4aafd658 100644
--- a/pytest.ini
+++ b/pytest.ini
@@ -7,6 +7,7 @@ markers =
auth_common
bucket_policy
bucket_encryption
+ bucket_logging
checksum
cloud_transition
encryption
diff --git a/s3tests_boto3/functional/test_s3.py b/s3tests_boto3/functional/test_s3.py
index 85dfba16..83073670 100644
--- a/s3tests_boto3/functional/test_s3.py
+++ b/s3tests_boto3/functional/test_s3.py
@@ -4929,24 +4929,6 @@ def test_bucket_acl_revoke_all():
policy['Grants'] = old_grants
client.put_bucket_acl(Bucket=bucket_name, AccessControlPolicy=policy)
-# TODO rgw log_bucket.set_as_logging_target() gives 403 Forbidden
-# http://tracker.newdream.net/issues/984
-@pytest.mark.fails_on_rgw
-def test_logging_toggle():
- bucket_name = get_new_bucket()
- client = get_client()
-
- main_display_name = get_main_display_name()
- main_user_id = get_main_user_id()
-
- status = {'LoggingEnabled': {'TargetBucket': bucket_name, 'TargetGrants': [{'Grantee': {'DisplayName': main_display_name, 'ID': main_user_id,'Type': 'CanonicalUser'},'Permission': 'FULL_CONTROL'}], 'TargetPrefix': 'foologgingprefix'}}
-
- client.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus=status)
- client.get_bucket_logging(Bucket=bucket_name)
- status = {'LoggingEnabled': {}}
- client.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus=status)
- # NOTE: this does not actually test whether or not logging works
-
def _setup_access(bucket_acl, object_acl):
"""
Simple test fixture: create a bucket with given ACL, with objects:
@@ -13931,3 +13913,742 @@ def test_post_object_upload_checksum():
r = requests.post(url, files=payload, verify=get_config_ssl_verify())
assert r.status_code == 400
+
+
+def _has_bucket_logging_extension():
+ src_bucket_name = get_new_bucket_name()
+ src_bucket = get_new_bucket_resource(name=src_bucket_name)
+ log_bucket_name = get_new_bucket_name()
+ log_bucket = get_new_bucket_resource(name=log_bucket_name)
+ client = get_client()
+ logging_enabled = {'TargetBucket': log_bucket_name, 'TargetPrefix': 'log/', 'LoggingType': 'Journal'}
+ try:
+ response = client.put_bucket_logging(Bucket=src_bucket_name, BucketLoggingStatus={
+ 'LoggingEnabled': logging_enabled,
+ })
+ except ParamValidationError as e:
+ return False
+ return True
+
+
+@pytest.mark.bucket_logging
+def test_put_bucket_logging():
+ src_bucket_name = get_new_bucket_name()
+ src_bucket = get_new_bucket_resource(name=src_bucket_name)
+ log_bucket_name = get_new_bucket_name()
+ log_bucket = get_new_bucket_resource(name=log_bucket_name)
+ client = get_client()
+ has_extensions = _has_bucket_logging_extension()
+
+ # minimal configuration
+ logging_enabled = {
+ 'TargetBucket': log_bucket_name,
+ 'TargetPrefix': 'log/'
+ }
+
+ if has_extensions:
+ logging_enabled['ObjectRollTime'] = 5
+ response = client.put_bucket_logging(Bucket=src_bucket_name, BucketLoggingStatus={
+ 'LoggingEnabled': logging_enabled,
+ })
+ assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+ response = client.get_bucket_logging(Bucket=src_bucket_name)
+ assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+ # default value for key prefix is returned
+ logging_enabled['TargetObjectKeyFormat'] = {'SimplePrefix': {}}
+ if has_extensions:
+ logging_enabled['LoggingType'] = 'Standard'
+ logging_enabled['RecordsBatchSize'] = 0
+ assert response['LoggingEnabled'] == logging_enabled
+
+ # with simple target object prefix
+ logging_enabled = {
+ 'TargetBucket': log_bucket_name,
+ 'TargetPrefix': 'log/',
+ 'TargetObjectKeyFormat': {
+ 'SimplePrefix': {}
+ }
+ }
+ if has_extensions:
+ logging_enabled['ObjectRollTime'] = 5
+ response = client.put_bucket_logging(Bucket=src_bucket_name, BucketLoggingStatus={
+ 'LoggingEnabled': logging_enabled,
+ })
+ assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+ response = client.get_bucket_logging(Bucket=src_bucket_name)
+ assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+ if has_extensions:
+ logging_enabled['LoggingType'] = 'Standard'
+ logging_enabled['RecordsBatchSize'] = 0
+ assert response['LoggingEnabled'] == logging_enabled
+
+ # with partitioned target object prefix
+ logging_enabled = {
+ 'TargetBucket': log_bucket_name,
+ 'TargetPrefix': 'log/',
+ 'TargetObjectKeyFormat': {
+ 'PartitionedPrefix': {
+ 'PartitionDateSource': 'DeliveryTime'
+ }
+ }
+ }
+ if has_extensions:
+ logging_enabled['ObjectRollTime'] = 5
+ response = client.put_bucket_logging(Bucket=src_bucket_name, BucketLoggingStatus={
+ 'LoggingEnabled': logging_enabled,
+ })
+ assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+ response = client.get_bucket_logging(Bucket=src_bucket_name)
+ assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+ if has_extensions:
+ logging_enabled['LoggingType'] = 'Standard'
+ logging_enabled['RecordsBatchSize'] = 0
+ assert response['LoggingEnabled'] == logging_enabled
+
+ # with target grant (not implemented in RGW)
+ main_display_name = get_main_display_name()
+ main_user_id = get_main_user_id()
+ logging_enabled = {
+ 'TargetBucket': log_bucket_name,
+ 'TargetPrefix': 'log/',
+ 'TargetGrants': [{'Grantee': {'DisplayName': main_display_name, 'ID': main_user_id,'Type': 'CanonicalUser'},'Permission': 'FULL_CONTROL'}]
+ }
+ if has_extensions:
+ logging_enabled['ObjectRollTime'] = 5
+ response = client.put_bucket_logging(Bucket=src_bucket_name, BucketLoggingStatus={
+ 'LoggingEnabled': logging_enabled,
+ })
+ assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+ response = client.get_bucket_logging(Bucket=src_bucket_name)
+ assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+ if has_extensions:
+ logging_enabled['LoggingType'] = 'Standard'
+ logging_enabled['RecordsBatchSize'] = 0
+ # target grants are not implemented
+ logging_enabled.pop('TargetGrants')
+ # default value for key prefix is returned
+ logging_enabled['TargetObjectKeyFormat'] = {'SimplePrefix': {}}
+ assert response['LoggingEnabled'] == logging_enabled
+
+
+@pytest.mark.bucket_logging
+def test_put_bucket_logging_errors():
+ src_bucket_name = get_new_bucket_name()
+ src_bucket = get_new_bucket_resource(name=src_bucket_name)
+ log_bucket_name1 = get_new_bucket_name()
+ log_bucket1 = get_new_bucket_resource(name=log_bucket_name1)
+ client = get_client()
+
+ # invalid source bucket
+ try:
+ response = client.put_bucket_logging(Bucket=src_bucket_name+'kaboom', BucketLoggingStatus={
+ 'LoggingEnabled': {'TargetBucket': log_bucket_name1, 'TargetPrefix': 'log/'},
+ })
+ assert False, 'expected failure'
+ except ClientError as e:
+ assert e.response['Error']['Code'] == 'NoSuchBucket'
+
+ # invalid log bucket
+ try:
+ response = client.put_bucket_logging(Bucket=src_bucket_name, BucketLoggingStatus={
+ 'LoggingEnabled': {'TargetBucket': log_bucket_name1+'kaboom', 'TargetPrefix': 'log/'},
+ })
+ assert False, 'expected failure'
+ except ClientError as e:
+ assert e.response['Error']['Code'] == 'NoSuchKey'
+
+ # log bucket has bucket logging
+ log_bucket_name2 = get_new_bucket_name()
+ log_bucket2 = get_new_bucket_resource(name=log_bucket_name2)
+ response = client.put_bucket_logging(Bucket=log_bucket_name2, BucketLoggingStatus={
+ 'LoggingEnabled': {'TargetBucket': log_bucket_name1, 'TargetPrefix': 'log/'},
+ })
+ assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+ try:
+ response = client.put_bucket_logging(Bucket=src_bucket_name, BucketLoggingStatus={
+ 'LoggingEnabled': {'TargetBucket': log_bucket_name2, 'TargetPrefix': 'log/'},
+ })
+ assert False, 'expected failure'
+ except ClientError as e:
+ assert e.response['Error']['Code'] == 'InvalidArgument'
+
+ # invalid partition prefix
+ logging_enabled = {
+ 'TargetBucket': log_bucket_name1,
+ 'TargetPrefix': 'log/',
+ 'TargetObjectKeyFormat': {
+ 'PartitionedPrefix': {
+ 'PartitionDateSource': 'kaboom'
+ }
+ }
+ }
+ try:
+ response = client.put_bucket_logging(Bucket=src_bucket_name, BucketLoggingStatus={
+ 'LoggingEnabled': logging_enabled,
+ })
+ assert False, 'expected failure'
+ except ClientError as e:
+ assert e.response['Error']['Code'] == 'MalformedXML'
+
+ # TODO: log bucket is encrypted
+ #_put_bucket_encryption_s3(client, log_bucket_name)
+ #try:
+ # response = client.put_bucket_logging(Bucket=src_bucket_name, BucketLoggingStatus={
+ # 'LoggingEnabled': {'TargetBucket': log_bucket_name, 'TargetPrefix': 'log/'},
+ # })
+ # assert False, 'expected failure'
+ #except ClientError as e:
+ # assert e.response['Error']['Code'] == 'InvalidArgument'
+
+ if _has_bucket_logging_extension():
+ try:
+ response = client.put_bucket_logging(Bucket=src_bucket_name, BucketLoggingStatus={
+ 'LoggingEnabled': {'TargetBucket': log_bucket_name1, 'TargetPrefix': 'log/', 'LoggingType': 'kaboom'},
+ })
+ assert False, 'expected failure'
+ except ClientError as e:
+ assert e.response['Error']['Code'] == 'MalformedXML'
+
+
+@pytest.mark.bucket_logging
+def test_rm_bucket_logging():
+ src_bucket_name = get_new_bucket_name()
+ src_bucket = get_new_bucket_resource(name=src_bucket_name)
+ log_bucket_name = get_new_bucket_name()
+ log_bucket = get_new_bucket_resource(name=log_bucket_name)
+ client = get_client()
+ logging_enabled = {'TargetBucket': log_bucket_name, 'TargetPrefix': 'log/'}
+ response = client.put_bucket_logging(Bucket=src_bucket_name, BucketLoggingStatus={
+ 'LoggingEnabled': logging_enabled,
+ })
+ assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+ response = client.put_bucket_logging(Bucket=src_bucket_name, BucketLoggingStatus={})
+ assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+ response = client.get_bucket_logging(Bucket=src_bucket_name)
+ assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+ assert not 'LoggingEnabled' in response
+
+
+@pytest.mark.bucket_logging
+@pytest.mark.fails_on_aws
+def test_put_bucket_logging_extensions():
+ if not _has_bucket_logging_extension():
+ pytest.skip('ceph extension to bucket logging not supported at client')
+ src_bucket_name = get_new_bucket_name()
+ src_bucket = get_new_bucket_resource(name=src_bucket_name)
+ log_bucket_name = get_new_bucket_name()
+ log_bucket = get_new_bucket_resource(name=log_bucket_name)
+ client = get_client()
+ logging_enabled = {'TargetBucket': log_bucket_name,
+ 'TargetPrefix': 'log/',
+ 'LoggingType': 'Standard',
+ 'ObjectRollTime': 5,
+ 'RecordsBatchSize': 0
+ }
+ response = client.put_bucket_logging(Bucket=src_bucket_name, BucketLoggingStatus={
+ 'LoggingEnabled': logging_enabled,
+ })
+ assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+ response = client.get_bucket_logging(Bucket=src_bucket_name)
+ assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+ logging_enabled['TargetObjectKeyFormat'] = {'SimplePrefix': {}}
+ assert response['LoggingEnabled'] == logging_enabled
+
+
+def _verify_records(records, bucket_name, event_type, src_keys):
+ keys_found = []
+ for record in iter(records.splitlines()):
+ print('bucket log record:', record)
+ if bucket_name in record and event_type in record:
+ for key in src_keys:
+ if key in record:
+ keys_found.append(key)
+ break
+ print('keys found in bucket log:', keys_found)
+ print('keys from the source bucket:', src_keys)
+ return len(keys_found) == len(src_keys)
+
+
+@pytest.mark.bucket_logging
+def test_bucket_logging_put_objects():
+ src_bucket_name = get_new_bucket_name()
+ src_bucket = get_new_bucket_resource(name=src_bucket_name)
+ log_bucket_name = get_new_bucket_name()
+ log_bucket = get_new_bucket_resource(name=log_bucket_name)
+ client = get_client()
+ has_extensions = _has_bucket_logging_extension()
+
+ # minimal configuration
+ logging_enabled = {'TargetBucket': log_bucket_name, 'TargetPrefix': 'log/'}
+ if has_extensions:
+ logging_enabled['ObjectRollTime'] = 5
+ logging_enabled['LoggingType'] = 'Journal'
+ response = client.put_bucket_logging(Bucket=src_bucket_name, BucketLoggingStatus={
+ 'LoggingEnabled': logging_enabled,
+ })
+ assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+ num_keys = 5
+ for j in range(num_keys):
+ name = 'myobject'+str(j)
+ client.put_object(Bucket=src_bucket_name, Key=name, Body=name)
+
+ response = client.list_objects_v2(Bucket=src_bucket_name)
+ src_keys = _get_keys(response)
+
+ time.sleep(5)
+ client.put_object(Bucket=src_bucket_name, Key='dummy', Body='dummy')
+
+ response = client.list_objects_v2(Bucket=log_bucket_name)
+ keys = _get_keys(response)
+ assert len(keys) == 1
+
+ for key in keys:
+ assert key.startswith('log/')
+ response = client.get_object(Bucket=log_bucket_name, Key=key)
+ body = _get_body(response)
+ assert _verify_records(body, src_bucket_name, 'REST.PUT.put_obj', src_keys)
+
+
+@pytest.mark.bucket_logging
+def test_bucket_logging_delete_objects():
+ src_bucket_name = get_new_bucket_name()
+ src_bucket = get_new_bucket_resource(name=src_bucket_name)
+ log_bucket_name = get_new_bucket_name()
+ log_bucket = get_new_bucket_resource(name=log_bucket_name)
+ client = get_client()
+ has_extensions = _has_bucket_logging_extension()
+
+ num_keys = 5
+ for j in range(num_keys):
+ name = 'myobject'+str(j)
+ client.put_object(Bucket=src_bucket_name, Key=name, Body=name)
+
+ # minimal configuration
+ logging_enabled = {'TargetBucket': log_bucket_name, 'TargetPrefix': 'log/'}
+ if has_extensions:
+ logging_enabled['ObjectRollTime'] = 5
+ logging_enabled['LoggingType'] = 'Journal'
+ response = client.put_bucket_logging(Bucket=src_bucket_name, BucketLoggingStatus={
+ 'LoggingEnabled': logging_enabled,
+ })
+
+ assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+ response = client.list_objects_v2(Bucket=src_bucket_name)
+ src_keys = _get_keys(response)
+ for key in src_keys:
+ client.delete_object(Bucket=src_bucket_name, Key=key)
+
+ time.sleep(5)
+ client.put_object(Bucket=src_bucket_name, Key='dummy', Body='dummy')
+
+ response = client.list_objects_v2(Bucket=log_bucket_name)
+ keys = _get_keys(response)
+ assert len(keys) == 1
+
+ key = keys[0]
+ assert key.startswith('log/')
+ response = client.get_object(Bucket=log_bucket_name, Key=key)
+ body = _get_body(response)
+ assert _verify_records(body, src_bucket_name, 'REST.DELETE.delete_obj', src_keys)
+
+
+@pytest.mark.bucket_logging
+def test_bucket_logging_get_objects():
+ src_bucket_name = get_new_bucket_name()
+ src_bucket = get_new_bucket_resource(name=src_bucket_name)
+ log_bucket_name = get_new_bucket_name()
+ log_bucket = get_new_bucket_resource(name=log_bucket_name)
+ client = get_client()
+ has_extensions = _has_bucket_logging_extension()
+
+ num_keys = 5
+ for j in range(num_keys):
+ name = 'myobject'+str(j)
+ client.put_object(Bucket=src_bucket_name, Key=name, Body=name)
+
+ # minimal configuration
+ logging_enabled = {'TargetBucket': log_bucket_name, 'TargetPrefix': 'log/'}
+ if has_extensions:
+ logging_enabled['ObjectRollTime'] = 5
+ logging_enabled['LoggingType'] = 'Standard'
+ response = client.put_bucket_logging(Bucket=src_bucket_name, BucketLoggingStatus={
+ 'LoggingEnabled': logging_enabled,
+ })
+
+ assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+ response = client.list_objects_v2(Bucket=src_bucket_name)
+ src_keys = _get_keys(response)
+ for key in src_keys:
+ client.get_object(Bucket=src_bucket_name, Key=key)
+
+ time.sleep(5)
+ client.put_object(Bucket=src_bucket_name, Key='dummy', Body='dummy')
+
+ response = client.list_objects_v2(Bucket=log_bucket_name)
+ keys = _get_keys(response)
+ assert len(keys) == 1
+
+ key = keys[0]
+ assert key.startswith('log/')
+ response = client.get_object(Bucket=log_bucket_name, Key=key)
+ body = _get_body(response)
+ assert _verify_records(body, src_bucket_name, 'REST.GET.get_obj', src_keys)
+
+
+@pytest.mark.bucket_logging
+def test_bucket_logging_copy_objects():
+ src_bucket_name = get_new_bucket_name()
+ src_bucket = get_new_bucket_resource(name=src_bucket_name)
+ log_bucket_name = get_new_bucket_name()
+ log_bucket = get_new_bucket_resource(name=log_bucket_name)
+ client = get_client()
+ has_extensions = _has_bucket_logging_extension()
+
+ num_keys = 5
+ for j in range(num_keys):
+ name = 'myobject'+str(j)
+ client.put_object(Bucket=src_bucket_name, Key=name, Body=name)
+
+ # minimal configuration
+ logging_enabled = {'TargetBucket': log_bucket_name, 'TargetPrefix': 'log/'}
+ if has_extensions:
+ logging_enabled['ObjectRollTime'] = 5
+ logging_enabled['LoggingType'] = 'Journal'
+ response = client.put_bucket_logging(Bucket=src_bucket_name, BucketLoggingStatus={
+ 'LoggingEnabled': logging_enabled,
+ })
+
+ assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+ response = client.list_objects_v2(Bucket=src_bucket_name)
+ src_keys = _get_keys(response)
+ for key in src_keys:
+ client.copy_object(Bucket=src_bucket_name, Key='copy_of_'+key, CopySource={'Bucket': src_bucket_name, 'Key': key})
+
+ time.sleep(5)
+ client.put_object(Bucket=src_bucket_name, Key='dummy', Body='dummy')
+
+ response = client.list_objects_v2(Bucket=log_bucket_name)
+ keys = _get_keys(response)
+ assert len(keys) == 1
+
+ key= keys[0]
+ assert key.startswith('log/')
+ response = client.get_object(Bucket=log_bucket_name, Key=key)
+ body = _get_body(response)
+ assert _verify_records(body, src_bucket_name, 'REST.PUT.copy_obj', src_keys)
+
+
+@pytest.mark.bucket_logging
+def test_bucket_logging_head_objects():
+ src_bucket_name = get_new_bucket_name()
+ src_bucket = get_new_bucket_resource(name=src_bucket_name)
+ log_bucket_name = get_new_bucket_name()
+ log_bucket = get_new_bucket_resource(name=log_bucket_name)
+ client = get_client()
+ has_extensions = _has_bucket_logging_extension()
+
+ num_keys = 5
+ for j in range(num_keys):
+ name = 'myobject'+str(j)
+ client.put_object(Bucket=src_bucket_name, Key=name, Body=name)
+
+ logging_enabled = {'TargetBucket': log_bucket_name, 'TargetPrefix': 'log/'}
+ if has_extensions:
+ logging_enabled['ObjectRollTime'] = 5
+ logging_enabled['LoggingType'] = 'Standard'
+ response = client.put_bucket_logging(Bucket=src_bucket_name, BucketLoggingStatus={
+ 'LoggingEnabled': logging_enabled,
+ })
+
+ assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+ response = client.list_objects_v2(Bucket=src_bucket_name)
+ src_keys = _get_keys(response)
+ for key in src_keys:
+ client.head_object(Bucket=src_bucket_name, Key=key)
+
+ time.sleep(5)
+ client.put_object(Bucket=src_bucket_name, Key='dummy', Body='dummy')
+
+ response = client.list_objects_v2(Bucket=log_bucket_name)
+ keys = _get_keys(response)
+ assert len(keys) == 1
+
+ key = keys[0]
+ assert key.startswith('log/')
+ response = client.get_object(Bucket=log_bucket_name, Key=key)
+ body = _get_body(response)
+ assert _verify_records(body, src_bucket_name, 'REST.HEAD.get_obj', src_keys)
+
+
+@pytest.mark.bucket_logging
+def test_bucket_logging_mpu():
+ src_bucket_name = get_new_bucket_name()
+ src_bucket = get_new_bucket_resource(name=src_bucket_name)
+ log_bucket_name = get_new_bucket_name()
+ log_bucket = get_new_bucket_resource(name=log_bucket_name)
+ client = get_client()
+ has_extensions = _has_bucket_logging_extension()
+
+ # minimal configuration
+ logging_enabled = {'TargetBucket': log_bucket_name, 'TargetPrefix': 'log/'}
+ if has_extensions:
+ logging_enabled['ObjectRollTime'] = 5
+ logging_enabled['LoggingType'] = 'Journal'
+ response = client.put_bucket_logging(Bucket=src_bucket_name, BucketLoggingStatus={
+ 'LoggingEnabled': logging_enabled,
+ })
+ assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+ src_key = "myobject"
+ objlen = 30 * 1024 * 1024
+ (upload_id, data, parts) = _multipart_upload(bucket_name=src_bucket_name, key=src_key, size=objlen)
+ client.complete_multipart_upload(Bucket=src_bucket_name, Key=src_key, UploadId=upload_id, MultipartUpload={'Parts': parts})
+
+ time.sleep(5)
+ client.put_object(Bucket=src_bucket_name, Key='dummy', Body='dummy')
+
+ response = client.list_objects_v2(Bucket=log_bucket_name)
+ keys = _get_keys(response)
+ assert len(keys) == 1
+
+ key = keys[0]
+ assert key.startswith('log/')
+ response = client.get_object(Bucket=log_bucket_name, Key=key)
+ body = _get_body(response)
+ assert _verify_records(body, src_bucket_name, 'REST.POST.complete_multipart', [src_key])
+
+
+def _bucket_logging_type(logging_type):
+ src_bucket_name = get_new_bucket_name()
+ src_bucket = get_new_bucket_resource(name=src_bucket_name)
+ log_bucket_name = get_new_bucket_name()
+ log_bucket = get_new_bucket_resource(name=log_bucket_name)
+ client = get_client()
+ logging_enabled = {
+ 'TargetBucket': log_bucket_name,
+ 'TargetPrefix': 'log/',
+ 'ObjectRollTime': 5,
+ 'LoggingType': logging_type
+ }
+ response = client.put_bucket_logging(Bucket=src_bucket_name, BucketLoggingStatus={
+ 'LoggingEnabled': logging_enabled,
+ })
+ num_keys = 5
+ for j in range(num_keys):
+ name = 'myobject'+str(j)
+ client.put_object(Bucket=src_bucket_name, Key=name, Body=name)
+ client.head_object(Bucket=src_bucket_name, Key=name)
+
+ response = client.list_objects_v2(Bucket=src_bucket_name)
+ src_keys = _get_keys(response)
+
+ time.sleep(5)
+ client.put_object(Bucket=src_bucket_name, Key='dummy', Body='dummy')
+ client.head_object(Bucket=src_bucket_name, Key='dummy')
+
+ response = client.list_objects_v2(Bucket=log_bucket_name)
+ keys = _get_keys(response)
+ assert len(keys) == 1
+
+ key = keys[0]
+ assert key.startswith('log/')
+ response = client.get_object(Bucket=log_bucket_name, Key=key)
+ body = _get_body(response)
+ if logging_type == 'Journal':
+ assert _verify_records(body, src_bucket_name, 'REST.PUT.put_obj', src_keys)
+ assert _verify_records(body, src_bucket_name, 'REST.HEAD.get_obj', src_keys) == False
+ elif logging_type == 'Standard':
+ assert _verify_records(body, src_bucket_name, 'REST.HEAD.get_obj', src_keys)
+ assert _verify_records(body, src_bucket_name, 'REST.PUT.put_obj', src_keys)
+ else:
+ assert False, 'invalid logging type:'+logging_type
+
+
+@pytest.mark.bucket_logging
+@pytest.mark.fails_on_aws
+def test_bucket_logging_event_type_j():
+ if not _has_bucket_logging_extension():
+ pytest.skip('ceph extension to bucket logging not supported at client')
+ _bucket_logging_type('Journal')
+
+
+@pytest.mark.bucket_logging
+@pytest.mark.fails_on_aws
+def test_bucket_logging_event_type_s():
+ if not _has_bucket_logging_extension():
+ pytest.skip('ceph extension to bucket logging not supported at client')
+ _bucket_logging_type('Standard')
+
+
+@pytest.mark.bucket_logging
+@pytest.mark.fails_on_aws
+def test_bucket_logging_roll_time():
+ if not _has_bucket_logging_extension():
+ pytest.skip('ceph extension to bucket logging not supported at client')
+ src_bucket_name = get_new_bucket_name()
+ src_bucket = get_new_bucket_resource(name=src_bucket_name)
+ log_bucket_name = get_new_bucket_name()
+ log_bucket = get_new_bucket_resource(name=log_bucket_name)
+ client = get_client()
+
+ roll_time = 10
+ logging_enabled = {'TargetBucket': log_bucket_name, 'TargetPrefix': 'log/', 'ObjectRollTime': roll_time}
+ response = client.put_bucket_logging(Bucket=src_bucket_name, BucketLoggingStatus={
+ 'LoggingEnabled': logging_enabled,
+ })
+ assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+ num_keys = 5
+ for j in range(num_keys):
+ name = 'myobject'+str(j)
+ client.put_object(Bucket=src_bucket_name, Key=name, Body=name)
+
+ response = client.list_objects_v2(Bucket=src_bucket_name)
+ src_keys = _get_keys(response)
+
+ time.sleep(roll_time/2)
+ client.put_object(Bucket=src_bucket_name, Key='myobject', Body='myobject')
+
+ response = client.list_objects_v2(Bucket=log_bucket_name)
+ keys = _get_keys(response)
+ assert len(keys) == 0
+
+ time.sleep(roll_time/2)
+ client.put_object(Bucket=src_bucket_name, Key='myobject', Body='myobject')
+
+ response = client.list_objects_v2(Bucket=log_bucket_name)
+ keys = _get_keys(response)
+ len(keys) == 1
+
+ key = keys[0]
+ assert key.startswith('log/')
+ response = client.get_object(Bucket=log_bucket_name, Key=key)
+ body = _get_body(response)
+ assert _verify_records(body, src_bucket_name, 'REST.PUT.put_obj', src_keys)
+ client.delete_object(Bucket=log_bucket_name, Key=key)
+
+ num_keys = 25
+ for j in range(num_keys):
+ name = 'myobject'+str(j)
+ client.put_object(Bucket=src_bucket_name, Key=name, Body=name)
+ time.sleep(1)
+
+ response = client.list_objects_v2(Bucket=src_bucket_name)
+ src_keys = _get_keys(response)
+
+ time.sleep(roll_time)
+ client.put_object(Bucket=src_bucket_name, Key='myobject', Body='myobject')
+
+ response = client.list_objects_v2(Bucket=log_bucket_name)
+ keys = _get_keys(response)
+ assert len(keys) > 1
+
+ body = ''
+ for key in keys:
+ assert key.startswith('log/')
+ response = client.get_object(Bucket=log_bucket_name, Key=key)
+ body += _get_body(response)
+ assert _verify_records(body, src_bucket_name, 'REST.PUT.put_obj', src_keys)
+
+
+@pytest.mark.bucket_logging
+def test_bucket_logging_multiple_prefixes():
+ log_bucket_name = get_new_bucket_name()
+ log_bucket = get_new_bucket_resource(name=log_bucket_name)
+ client = get_client()
+ has_extensions = _has_bucket_logging_extension()
+
+ num_buckets = 5
+ buckets = []
+ bucket_name_prefix = get_new_bucket_name()
+ for j in range(num_buckets):
+ src_bucket_name = bucket_name_prefix+str(j)
+ src_bucket = get_new_bucket_resource(name=src_bucket_name)
+ logging_enabled = {'TargetBucket': log_bucket_name, 'TargetPrefix': src_bucket_name+'/'}
+ if has_extensions:
+ logging_enabled['ObjectRollTime'] = 5
+ response = client.put_bucket_logging(Bucket=src_bucket_name, BucketLoggingStatus={
+ 'LoggingEnabled': logging_enabled,
+ })
+ assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+ buckets.append(src_bucket_name)
+
+ num_keys = 5
+ for src_bucket_name in buckets:
+ for j in range(num_keys):
+ name = 'myobject'+str(j)
+ client.put_object(Bucket=src_bucket_name, Key=name, Body=name)
+
+ time.sleep(5)
+ for src_bucket_name in buckets:
+ client.head_object(Bucket=src_bucket_name, Key='myobject0')
+
+ response = client.list_objects_v2(Bucket=log_bucket_name)
+ keys = _get_keys(response)
+ assert len(keys) >= num_buckets
+
+ for key in keys:
+ response = client.get_object(Bucket=log_bucket_name, Key=key)
+ body = _get_body(response)
+ found = False
+ for src_bucket_name in buckets:
+ if key.startswith(src_bucket_name):
+ found = True
+ response = client.list_objects_v2(Bucket=src_bucket_name)
+ src_keys = _get_keys(response)
+ assert _verify_records(body, src_bucket_name, 'REST.PUT.put_obj', src_keys)
+ assert found
+
+
+@pytest.mark.bucket_logging
+def test_bucket_logging_single_prefix():
+ log_bucket_name = get_new_bucket_name()
+ log_bucket = get_new_bucket_resource(name=log_bucket_name)
+ client = get_client()
+ has_extensions = _has_bucket_logging_extension()
+
+ num_buckets = 5
+ buckets = []
+ bucket_name_prefix = get_new_bucket_name()
+ for j in range(num_buckets):
+ src_bucket_name = bucket_name_prefix+str(j)
+ src_bucket = get_new_bucket_resource(name=src_bucket_name)
+ # minimal configuration
+ logging_enabled = {'TargetBucket': log_bucket_name, 'TargetPrefix': 'log/'}
+ if has_extensions:
+ logging_enabled['ObjectRollTime'] = 5
+ response = client.put_bucket_logging(Bucket=src_bucket_name, BucketLoggingStatus={
+ 'LoggingEnabled': logging_enabled,
+ })
+ assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+ buckets.append(src_bucket_name)
+
+ num_keys = 5
+ bucket_ind = 0
+ for src_bucket_name in buckets:
+ bucket_ind += 1
+ for j in range(num_keys):
+ name = 'myobject'+str(bucket_ind)+str(j)
+ client.put_object(Bucket=src_bucket_name, Key=name, Body=name)
+
+ time.sleep(5)
+ client.put_object(Bucket=buckets[0], Key='dummy', Body='dummy')
+
+ response = client.list_objects_v2(Bucket=log_bucket_name)
+ keys = _get_keys(response)
+ assert len(keys) == 1
+
+ key = keys[0]
+ response = client.get_object(Bucket=log_bucket_name, Key=key)
+ body = _get_body(response)
+ found = False
+ for src_bucket_name in buckets:
+ response = client.list_objects_v2(Bucket=src_bucket_name)
+ src_keys = _get_keys(response)
+ found = _verify_records(body, src_bucket_name, 'REST.PUT.put_obj', src_keys)
+ assert found
From 7645dab21d5f590b76818e9d4fc32e5b869bc949 Mon Sep 17 00:00:00 2001
From: Yuval Lifshitz
Date: Wed, 23 Oct 2024 10:25:23 +0000
Subject: [PATCH 2/6] rgw/logging add versioned bucket tests
as well as multi-delete tests
Signed-off-by: Yuval Lifshitz
---
s3tests_boto3/functional/test_s3.py | 358 ++++++++++++++++++++++++----
1 file changed, 306 insertions(+), 52 deletions(-)
diff --git a/s3tests_boto3/functional/test_s3.py b/s3tests_boto3/functional/test_s3.py
index 83073670..cc352b55 100644
--- a/s3tests_boto3/functional/test_s3.py
+++ b/s3tests_boto3/functional/test_s3.py
@@ -13931,6 +13931,66 @@ def _has_bucket_logging_extension():
return True
+import shlex
+
+def _parse_standard_log_record(record):
+ record = record.replace('[', '"').replace(']', '"')
+ chunks = shlex.split(record)
+ assert len(chunks) == 26
+ return json.dumps({
+ 'BucketOwner': chunks[0],
+ 'BucketName': chunks[1],
+ 'RequestDateTime': chunks[2],
+ 'RemoteIP': chunks[3],
+ 'Requester': chunks[4],
+ 'RequestID': chunks[5],
+ 'Operation': chunks[6],
+ 'Key': chunks[7],
+ 'RequestURI': chunks[8],
+ 'HTTPStatus': chunks[9],
+ 'ErrorCode': chunks[10],
+ 'BytesSent': chunks[11],
+ 'ObjectSize': chunks[12],
+ 'TotalTime': chunks[13],
+ 'TurnAroundTime': chunks[14],
+ 'Referrer': chunks[15],
+ 'UserAgent': chunks[16],
+ 'VersionID': chunks[17],
+ 'HostID': chunks[18],
+ 'SigVersion': chunks[19],
+ 'CipherSuite': chunks[20],
+ 'AuthType': chunks[21],
+ 'HostHeader': chunks[22],
+ 'TLSVersion': chunks[23],
+ 'AccessPointARN': chunks[24],
+ 'ACLRequired': chunks[25],
+ }, indent=4)
+
+
+def _parse_journal_log_record(record):
+ record = record.replace('[', '"').replace(']', '"')
+ chunks = shlex.split(record)
+ assert len(chunks) == 8
+ return json.dumps({
+ 'BucketOwner': chunks[0],
+ 'BucketName': chunks[1],
+ 'RequestDateTime': chunks[2],
+ 'Operation': chunks[3],
+ 'Key': chunks[4],
+ 'ObjectSize': chunks[5],
+ 'VersionID': chunks[6],
+ 'ETAG': chunks[7],
+ }, indent=4)
+
+def _parse_log_record(record, record_type):
+ if record_type == 'Standard':
+ return _parse_standard_log_record(record)
+ elif record_type == 'Journal':
+ return _parse_journal_log_record(record)
+ else:
+ assert False, 'unknown log record type'
+
+
@pytest.mark.bucket_logging
def test_put_bucket_logging():
src_bucket_name = get_new_bucket_name()
@@ -14159,24 +14219,33 @@ def test_put_bucket_logging_extensions():
assert response['LoggingEnabled'] == logging_enabled
-def _verify_records(records, bucket_name, event_type, src_keys):
+import logging
+
+logger = logging.getLogger(__name__)
+
+def _verify_records(records, bucket_name, event_type, src_keys, record_type, expected_count):
keys_found = []
for record in iter(records.splitlines()):
- print('bucket log record:', record)
+ logger.info('bucket log record: %s', _parse_log_record(record, record_type))
if bucket_name in record and event_type in record:
for key in src_keys:
if key in record:
keys_found.append(key)
break
- print('keys found in bucket log:', keys_found)
- print('keys from the source bucket:', src_keys)
- return len(keys_found) == len(src_keys)
+ logger.info('keys found in bucket log: %s', str(keys_found))
+ logger.info('keys from the source bucket: %s', str(src_keys))
+ return len(keys_found) == expected_count
+def randcontent():
+ letters = string.ascii_lowercase
+ length = random.randint(10, 1024)
+ return ''.join(random.choice(letters) for i in range(length))
-@pytest.mark.bucket_logging
-def test_bucket_logging_put_objects():
- src_bucket_name = get_new_bucket_name()
- src_bucket = get_new_bucket_resource(name=src_bucket_name)
+
+def _bucket_logging_put_objects(versioned):
+ src_bucket_name = get_new_bucket()
+ if versioned:
+ check_configure_versioning_retry(src_bucket_name, "Enabled", "Enabled")
log_bucket_name = get_new_bucket_name()
log_bucket = get_new_bucket_resource(name=log_bucket_name)
client = get_client()
@@ -14195,7 +14264,14 @@ def test_bucket_logging_put_objects():
num_keys = 5
for j in range(num_keys):
name = 'myobject'+str(j)
- client.put_object(Bucket=src_bucket_name, Key=name, Body=name)
+ client.put_object(Bucket=src_bucket_name, Key=name, Body=randcontent())
+ if versioned:
+ client.put_object(Bucket=src_bucket_name, Key=name, Body=randcontent())
+
+ if versioned:
+ expected_count = 2*num_keys
+ else:
+ expected_count = num_keys
response = client.list_objects_v2(Bucket=src_bucket_name)
src_keys = _get_keys(response)
@@ -14206,18 +14282,30 @@ def test_bucket_logging_put_objects():
response = client.list_objects_v2(Bucket=log_bucket_name)
keys = _get_keys(response)
assert len(keys) == 1
+
+ record_type = 'Standard' if not has_extensions else 'Journal'
for key in keys:
assert key.startswith('log/')
response = client.get_object(Bucket=log_bucket_name, Key=key)
body = _get_body(response)
- assert _verify_records(body, src_bucket_name, 'REST.PUT.put_obj', src_keys)
+ assert _verify_records(body, src_bucket_name, 'REST.PUT.put_obj', src_keys, record_type, expected_count)
@pytest.mark.bucket_logging
-def test_bucket_logging_delete_objects():
- src_bucket_name = get_new_bucket_name()
- src_bucket = get_new_bucket_resource(name=src_bucket_name)
+def test_bucket_logging_put_objects():
+ _bucket_logging_put_objects(False)
+
+
+@pytest.mark.bucket_logging
+def test_bucket_logging_put_objects_versioned():
+ _bucket_logging_put_objects(True)
+
+
+def _bucket_logging_delete_objects(versioned):
+ src_bucket_name = get_new_bucket()
+ if versioned:
+ check_configure_versioning_retry(src_bucket_name, "Enabled", "Enabled")
log_bucket_name = get_new_bucket_name()
log_bucket = get_new_bucket_resource(name=log_bucket_name)
client = get_client()
@@ -14226,7 +14314,9 @@ def test_bucket_logging_delete_objects():
num_keys = 5
for j in range(num_keys):
name = 'myobject'+str(j)
- client.put_object(Bucket=src_bucket_name, Key=name, Body=name)
+ client.put_object(Bucket=src_bucket_name, Key=name, Body=randcontent())
+ if versioned:
+ client.put_object(Bucket=src_bucket_name, Key=name, Body=randcontent())
# minimal configuration
logging_enabled = {'TargetBucket': log_bucket_name, 'TargetPrefix': 'log/'}
@@ -14241,6 +14331,9 @@ def test_bucket_logging_delete_objects():
response = client.list_objects_v2(Bucket=src_bucket_name)
src_keys = _get_keys(response)
for key in src_keys:
+ if versioned:
+ response = client.head_object(Bucket=src_bucket_name, Key=key)
+ client.delete_object(Bucket=src_bucket_name, Key=key, VersionId=response['VersionId'])
client.delete_object(Bucket=src_bucket_name, Key=key)
time.sleep(5)
@@ -14250,17 +14343,34 @@ def test_bucket_logging_delete_objects():
keys = _get_keys(response)
assert len(keys) == 1
+ if versioned:
+ expected_count = 2*num_keys
+ else:
+ expected_count = num_keys
+
key = keys[0]
assert key.startswith('log/')
response = client.get_object(Bucket=log_bucket_name, Key=key)
body = _get_body(response)
- assert _verify_records(body, src_bucket_name, 'REST.DELETE.delete_obj', src_keys)
+ record_type = 'Standard' if not has_extensions else 'Journal'
+ assert _verify_records(body, src_bucket_name, 'REST.DELETE.delete_obj', src_keys, record_type, expected_count)
@pytest.mark.bucket_logging
-def test_bucket_logging_get_objects():
- src_bucket_name = get_new_bucket_name()
- src_bucket = get_new_bucket_resource(name=src_bucket_name)
+def test_bucket_logging_delete_objects():
+ _bucket_logging_delete_objects(False)
+
+
+@pytest.mark.bucket_logging
+def test_bucket_logging_delete_objects_versioned():
+ _bucket_logging_delete_objects(True)
+
+
+@pytest.mark.bucket_logging
+def _bucket_logging_get_objects(versioned):
+ src_bucket_name = get_new_bucket()
+ if versioned:
+ check_configure_versioning_retry(src_bucket_name, "Enabled", "Enabled")
log_bucket_name = get_new_bucket_name()
log_bucket = get_new_bucket_resource(name=log_bucket_name)
client = get_client()
@@ -14269,7 +14379,9 @@ def test_bucket_logging_get_objects():
num_keys = 5
for j in range(num_keys):
name = 'myobject'+str(j)
- client.put_object(Bucket=src_bucket_name, Key=name, Body=name)
+ client.put_object(Bucket=src_bucket_name, Key=name, Body=randcontent())
+ if versioned:
+ client.put_object(Bucket=src_bucket_name, Key=name, Body=randcontent())
# minimal configuration
logging_enabled = {'TargetBucket': log_bucket_name, 'TargetPrefix': 'log/'}
@@ -14284,6 +14396,9 @@ def test_bucket_logging_get_objects():
response = client.list_objects_v2(Bucket=src_bucket_name)
src_keys = _get_keys(response)
for key in src_keys:
+ if versioned:
+ response = client.head_object(Bucket=src_bucket_name, Key=key)
+ client.get_object(Bucket=src_bucket_name, Key=key, VersionId=response['VersionId'])
client.get_object(Bucket=src_bucket_name, Key=key)
time.sleep(5)
@@ -14293,17 +14408,33 @@ def test_bucket_logging_get_objects():
keys = _get_keys(response)
assert len(keys) == 1
+ if versioned:
+ expected_count = 2*num_keys
+ else:
+ expected_count = num_keys
+
key = keys[0]
assert key.startswith('log/')
response = client.get_object(Bucket=log_bucket_name, Key=key)
body = _get_body(response)
- assert _verify_records(body, src_bucket_name, 'REST.GET.get_obj', src_keys)
+ assert _verify_records(body, src_bucket_name, 'REST.GET.get_obj', src_keys, 'Standard', expected_count)
@pytest.mark.bucket_logging
-def test_bucket_logging_copy_objects():
- src_bucket_name = get_new_bucket_name()
- src_bucket = get_new_bucket_resource(name=src_bucket_name)
+def test_bucket_logging_get_objects():
+ _bucket_logging_get_objects(False)
+
+
+@pytest.mark.bucket_logging
+def test_bucket_logging_get_objects_versioned():
+ _bucket_logging_get_objects(True)
+
+
+@pytest.mark.bucket_logging
+def _bucket_logging_copy_objects(versioned):
+ src_bucket_name = get_new_bucket()
+ if versioned:
+ check_configure_versioning_retry(src_bucket_name, "Enabled", "Enabled")
log_bucket_name = get_new_bucket_name()
log_bucket = get_new_bucket_resource(name=log_bucket_name)
client = get_client()
@@ -14312,7 +14443,9 @@ def test_bucket_logging_copy_objects():
num_keys = 5
for j in range(num_keys):
name = 'myobject'+str(j)
- client.put_object(Bucket=src_bucket_name, Key=name, Body=name)
+ client.put_object(Bucket=src_bucket_name, Key=name, Body=randcontent())
+ if versioned:
+ client.put_object(Bucket=src_bucket_name, Key=name, Body=randcontent())
# minimal configuration
logging_enabled = {'TargetBucket': log_bucket_name, 'TargetPrefix': 'log/'}
@@ -14340,13 +14473,25 @@ def test_bucket_logging_copy_objects():
assert key.startswith('log/')
response = client.get_object(Bucket=log_bucket_name, Key=key)
body = _get_body(response)
- assert _verify_records(body, src_bucket_name, 'REST.PUT.copy_obj', src_keys)
+ record_type = 'Standard' if not has_extensions else 'Journal'
+ assert _verify_records(body, src_bucket_name, 'REST.PUT.copy_obj', src_keys, record_type, num_keys)
@pytest.mark.bucket_logging
-def test_bucket_logging_head_objects():
- src_bucket_name = get_new_bucket_name()
- src_bucket = get_new_bucket_resource(name=src_bucket_name)
+def test_bucket_logging_copy_objects():
+ _bucket_logging_copy_objects(False)
+
+
+@pytest.mark.bucket_logging
+def test_bucket_logging_copy_objects_versioned():
+ _bucket_logging_copy_objects(True)
+
+
+@pytest.mark.bucket_logging
+def _bucket_logging_head_objects(versioned):
+ src_bucket_name = get_new_bucket()
+ if versioned:
+ check_configure_versioning_retry(src_bucket_name, "Enabled", "Enabled")
log_bucket_name = get_new_bucket_name()
log_bucket = get_new_bucket_resource(name=log_bucket_name)
client = get_client()
@@ -14355,7 +14500,7 @@ def test_bucket_logging_head_objects():
num_keys = 5
for j in range(num_keys):
name = 'myobject'+str(j)
- client.put_object(Bucket=src_bucket_name, Key=name, Body=name)
+ client.put_object(Bucket=src_bucket_name, Key=name, Body=randcontent())
logging_enabled = {'TargetBucket': log_bucket_name, 'TargetPrefix': 'log/'}
if has_extensions:
@@ -14369,7 +14514,11 @@ def test_bucket_logging_head_objects():
response = client.list_objects_v2(Bucket=src_bucket_name)
src_keys = _get_keys(response)
for key in src_keys:
- client.head_object(Bucket=src_bucket_name, Key=key)
+ if versioned:
+ response = client.head_object(Bucket=src_bucket_name, Key=key)
+ client.head_object(Bucket=src_bucket_name, Key=key, VersionId=response['VersionId'])
+ else:
+ client.head_object(Bucket=src_bucket_name, Key=key)
time.sleep(5)
client.put_object(Bucket=src_bucket_name, Key='dummy', Body='dummy')
@@ -14378,17 +14527,33 @@ def test_bucket_logging_head_objects():
keys = _get_keys(response)
assert len(keys) == 1
+ if versioned:
+ expected_count = 2*num_keys
+ else:
+ expected_count = num_keys
+
key = keys[0]
assert key.startswith('log/')
response = client.get_object(Bucket=log_bucket_name, Key=key)
body = _get_body(response)
- assert _verify_records(body, src_bucket_name, 'REST.HEAD.get_obj', src_keys)
+ assert _verify_records(body, src_bucket_name, 'REST.HEAD.get_obj', src_keys, 'Standard', expected_count)
@pytest.mark.bucket_logging
-def test_bucket_logging_mpu():
- src_bucket_name = get_new_bucket_name()
- src_bucket = get_new_bucket_resource(name=src_bucket_name)
+def test_bucket_logging_head_objects():
+ _bucket_logging_head_objects(False)
+
+
+@pytest.mark.bucket_logging
+def test_bucket_logging_head_objects_versioned():
+ _bucket_logging_head_objects(True)
+
+
+@pytest.mark.bucket_logging
+def _bucket_logging_mpu(versioned):
+ src_bucket_name = get_new_bucket()
+ if versioned:
+ check_configure_versioning_retry(src_bucket_name, "Enabled", "Enabled")
log_bucket_name = get_new_bucket_name()
log_bucket = get_new_bucket_resource(name=log_bucket_name)
client = get_client()
@@ -14408,6 +14573,9 @@ def test_bucket_logging_mpu():
objlen = 30 * 1024 * 1024
(upload_id, data, parts) = _multipart_upload(bucket_name=src_bucket_name, key=src_key, size=objlen)
client.complete_multipart_upload(Bucket=src_bucket_name, Key=src_key, UploadId=upload_id, MultipartUpload={'Parts': parts})
+ if versioned:
+ (upload_id, data, parts) = _multipart_upload(bucket_name=src_bucket_name, key=src_key, size=objlen)
+ client.complete_multipart_upload(Bucket=src_bucket_name, Key=src_key, UploadId=upload_id, MultipartUpload={'Parts': parts})
time.sleep(5)
client.put_object(Bucket=src_bucket_name, Key='dummy', Body='dummy')
@@ -14416,11 +14584,97 @@ def test_bucket_logging_mpu():
keys = _get_keys(response)
assert len(keys) == 1
+ if versioned:
+ expected_count = 2
+ else:
+ expected_count = 1
+
key = keys[0]
assert key.startswith('log/')
response = client.get_object(Bucket=log_bucket_name, Key=key)
body = _get_body(response)
- assert _verify_records(body, src_bucket_name, 'REST.POST.complete_multipart', [src_key])
+ record_type = 'Standard' if not has_extensions else 'Journal'
+ assert _verify_records(body, src_bucket_name, 'REST.POST.complete_multipart', [src_key], record_type, expected_count)
+
+
+@pytest.mark.bucket_logging
+def test_bucket_logging_mpu():
+ _bucket_logging_mpu(False)
+
+
+@pytest.mark.bucket_logging
+def test_bucket_logging_mpu_versioned():
+ _bucket_logging_mpu(True)
+
+
+def _bucket_logging_multi_delete(versioned):
+ src_bucket_name = get_new_bucket()
+ if versioned:
+ check_configure_versioning_retry(src_bucket_name, "Enabled", "Enabled")
+ log_bucket_name = get_new_bucket_name()
+ log_bucket = get_new_bucket_resource(name=log_bucket_name)
+ client = get_client()
+ has_extensions = _has_bucket_logging_extension()
+
+ num_keys = 5
+ for j in range(num_keys):
+ name = 'myobject'+str(j)
+ client.put_object(Bucket=src_bucket_name, Key=name, Body=randcontent())
+ if versioned:
+ client.put_object(Bucket=src_bucket_name, Key=name, Body=randcontent())
+
+ # minimal configuration
+ logging_enabled = {'TargetBucket': log_bucket_name, 'TargetPrefix': 'log/'}
+ if has_extensions:
+ logging_enabled['ObjectRollTime'] = 5
+ logging_enabled['LoggingType'] = 'Journal'
+ response = client.put_bucket_logging(Bucket=src_bucket_name, BucketLoggingStatus={
+ 'LoggingEnabled': logging_enabled,
+ })
+
+ assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+ response = client.list_objects_v2(Bucket=src_bucket_name)
+ src_keys = _get_keys(response)
+ if versioned:
+ response = client.list_object_versions(Bucket=src_bucket_name)
+ objs_list = []
+ for version in response['Versions']:
+ obj_dict = {'Key': version['Key'], 'VersionId': version['VersionId']}
+ objs_list.append(obj_dict)
+ objs_dict = {'Objects': objs_list}
+ client.delete_objects(Bucket=src_bucket_name, Delete=objs_dict)
+ else:
+ objs_dict = _make_objs_dict(key_names=src_keys)
+ client.delete_objects(Bucket=src_bucket_name, Delete=objs_dict)
+
+ time.sleep(5)
+ client.put_object(Bucket=src_bucket_name, Key='dummy', Body='dummy')
+
+ response = client.list_objects_v2(Bucket=log_bucket_name)
+ keys = _get_keys(response)
+ assert len(keys) == 1
+
+ if versioned:
+ expected_count = 2*num_keys
+ else:
+ expected_count = num_keys
+
+ key = keys[0]
+ assert key.startswith('log/')
+ response = client.get_object(Bucket=log_bucket_name, Key=key)
+ body = _get_body(response)
+ record_type = 'Standard' if not has_extensions else 'Journal'
+ assert _verify_records(body, src_bucket_name, "REST.POST.multi_object_delete", src_keys, record_type, expected_count)
+
+
+@pytest.mark.bucket_logging
+def test_bucket_logging_multi_delete():
+ _bucket_logging_multi_delete(False)
+
+
+@pytest.mark.bucket_logging
+def test_bucket_logging_multi_delete_versioned():
+ _bucket_logging_multi_delete(True)
def _bucket_logging_type(logging_type):
@@ -14441,7 +14695,7 @@ def _bucket_logging_type(logging_type):
num_keys = 5
for j in range(num_keys):
name = 'myobject'+str(j)
- client.put_object(Bucket=src_bucket_name, Key=name, Body=name)
+ client.put_object(Bucket=src_bucket_name, Key=name, Body=randcontent())
client.head_object(Bucket=src_bucket_name, Key=name)
response = client.list_objects_v2(Bucket=src_bucket_name)
@@ -14460,11 +14714,11 @@ def _bucket_logging_type(logging_type):
response = client.get_object(Bucket=log_bucket_name, Key=key)
body = _get_body(response)
if logging_type == 'Journal':
- assert _verify_records(body, src_bucket_name, 'REST.PUT.put_obj', src_keys)
- assert _verify_records(body, src_bucket_name, 'REST.HEAD.get_obj', src_keys) == False
+ assert _verify_records(body, src_bucket_name, 'REST.PUT.put_obj', src_keys, 'Journal', num_keys)
+ assert _verify_records(body, src_bucket_name, 'REST.HEAD.get_obj', src_keys, 'Journal', num_keys) == False
elif logging_type == 'Standard':
- assert _verify_records(body, src_bucket_name, 'REST.HEAD.get_obj', src_keys)
- assert _verify_records(body, src_bucket_name, 'REST.PUT.put_obj', src_keys)
+ assert _verify_records(body, src_bucket_name, 'REST.HEAD.get_obj', src_keys, 'Standard', num_keys)
+ assert _verify_records(body, src_bucket_name, 'REST.PUT.put_obj', src_keys, 'Standard', num_keys)
else:
assert False, 'invalid logging type:'+logging_type
@@ -14506,20 +14760,20 @@ def test_bucket_logging_roll_time():
num_keys = 5
for j in range(num_keys):
name = 'myobject'+str(j)
- client.put_object(Bucket=src_bucket_name, Key=name, Body=name)
+ client.put_object(Bucket=src_bucket_name, Key=name, Body=randcontent())
response = client.list_objects_v2(Bucket=src_bucket_name)
src_keys = _get_keys(response)
time.sleep(roll_time/2)
- client.put_object(Bucket=src_bucket_name, Key='myobject', Body='myobject')
+ client.put_object(Bucket=src_bucket_name, Key='myobject', Body=randcontent())
response = client.list_objects_v2(Bucket=log_bucket_name)
keys = _get_keys(response)
assert len(keys) == 0
time.sleep(roll_time/2)
- client.put_object(Bucket=src_bucket_name, Key='myobject', Body='myobject')
+ client.put_object(Bucket=src_bucket_name, Key='myobject', Body=randcontent())
response = client.list_objects_v2(Bucket=log_bucket_name)
keys = _get_keys(response)
@@ -14529,20 +14783,20 @@ def test_bucket_logging_roll_time():
assert key.startswith('log/')
response = client.get_object(Bucket=log_bucket_name, Key=key)
body = _get_body(response)
- assert _verify_records(body, src_bucket_name, 'REST.PUT.put_obj', src_keys)
+ assert _verify_records(body, src_bucket_name, 'REST.PUT.put_obj', src_keys, 'Standard', num_keys)
client.delete_object(Bucket=log_bucket_name, Key=key)
num_keys = 25
for j in range(num_keys):
name = 'myobject'+str(j)
- client.put_object(Bucket=src_bucket_name, Key=name, Body=name)
+ client.put_object(Bucket=src_bucket_name, Key=name, Body=randcontent())
time.sleep(1)
response = client.list_objects_v2(Bucket=src_bucket_name)
src_keys = _get_keys(response)
time.sleep(roll_time)
- client.put_object(Bucket=src_bucket_name, Key='myobject', Body='myobject')
+ client.put_object(Bucket=src_bucket_name, Key='myobject', Body=randcontent())
response = client.list_objects_v2(Bucket=log_bucket_name)
keys = _get_keys(response)
@@ -14553,7 +14807,7 @@ def test_bucket_logging_roll_time():
assert key.startswith('log/')
response = client.get_object(Bucket=log_bucket_name, Key=key)
body += _get_body(response)
- assert _verify_records(body, src_bucket_name, 'REST.PUT.put_obj', src_keys)
+ assert _verify_records(body, src_bucket_name, 'REST.PUT.put_obj', src_keys, 'Standard', num_keys+1)
@pytest.mark.bucket_logging
@@ -14582,7 +14836,7 @@ def test_bucket_logging_multiple_prefixes():
for src_bucket_name in buckets:
for j in range(num_keys):
name = 'myobject'+str(j)
- client.put_object(Bucket=src_bucket_name, Key=name, Body=name)
+ client.put_object(Bucket=src_bucket_name, Key=name, Body=randcontent())
time.sleep(5)
for src_bucket_name in buckets:
@@ -14601,7 +14855,7 @@ def test_bucket_logging_multiple_prefixes():
found = True
response = client.list_objects_v2(Bucket=src_bucket_name)
src_keys = _get_keys(response)
- assert _verify_records(body, src_bucket_name, 'REST.PUT.put_obj', src_keys)
+ assert _verify_records(body, src_bucket_name, 'REST.PUT.put_obj', src_keys, 'Standard', num_keys)
assert found
@@ -14634,7 +14888,7 @@ def test_bucket_logging_single_prefix():
bucket_ind += 1
for j in range(num_keys):
name = 'myobject'+str(bucket_ind)+str(j)
- client.put_object(Bucket=src_bucket_name, Key=name, Body=name)
+ client.put_object(Bucket=src_bucket_name, Key=name, Body=randcontent())
time.sleep(5)
client.put_object(Bucket=buckets[0], Key='dummy', Body='dummy')
@@ -14650,5 +14904,5 @@ def test_bucket_logging_single_prefix():
for src_bucket_name in buckets:
response = client.list_objects_v2(Bucket=src_bucket_name)
src_keys = _get_keys(response)
- found = _verify_records(body, src_bucket_name, 'REST.PUT.put_obj', src_keys)
+ found = _verify_records(body, src_bucket_name, 'REST.PUT.put_obj', src_keys, 'Standard', num_keys)
assert found
From 7272b7edc7b09f7b35174027725f2313f4699b3c Mon Sep 17 00:00:00 2001
From: Yuval Lifshitz
Date: Wed, 23 Oct 2024 12:01:49 +0000
Subject: [PATCH 3/6] rgw/logging: add concurrency test for init/commit race
conditions
Signed-off-by: Yuval Lifshitz
---
s3tests_boto3/functional/test_s3.py | 54 +++++++++++++++++++++++++++++
1 file changed, 54 insertions(+)
diff --git a/s3tests_boto3/functional/test_s3.py b/s3tests_boto3/functional/test_s3.py
index cc352b55..a0e91872 100644
--- a/s3tests_boto3/functional/test_s3.py
+++ b/s3tests_boto3/functional/test_s3.py
@@ -14302,6 +14302,60 @@ def test_bucket_logging_put_objects_versioned():
_bucket_logging_put_objects(True)
+@pytest.mark.bucket_logging
+def test_bucket_logging_put_concurrency():
+ src_bucket_name = get_new_bucket()
+ log_bucket_name = get_new_bucket_name()
+ log_bucket = get_new_bucket_resource(name=log_bucket_name)
+ client = get_client(client_config=botocore.config.Config(max_pool_connections=50))
+ has_extensions = _has_bucket_logging_extension()
+
+ # minimal configuration
+ logging_enabled = {'TargetBucket': log_bucket_name, 'TargetPrefix': 'log/'}
+ if has_extensions:
+ logging_enabled['ObjectRollTime'] = 5
+ logging_enabled['LoggingType'] = 'Journal'
+ response = client.put_bucket_logging(Bucket=src_bucket_name, BucketLoggingStatus={
+ 'LoggingEnabled': logging_enabled,
+ })
+ assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+ num_keys = 50
+ t = []
+ for i in range(num_keys):
+ name = 'myobject'+str(i)
+ thr = threading.Thread(target = client.put_object,
+ kwargs={'Bucket': src_bucket_name, 'Key': name, 'Body': randcontent()})
+ thr.start()
+ t.append(thr)
+ _do_wait_completion(t)
+
+ response = client.list_objects_v2(Bucket=src_bucket_name)
+ src_keys = _get_keys(response)
+
+ time.sleep(5)
+ t = []
+ for i in range(num_keys):
+ thr = threading.Thread(target = client.put_object,
+ kwargs={'Bucket': src_bucket_name, 'Key': 'dummy', 'Body': 'dummy'})
+ thr.start()
+ t.append(thr)
+ _do_wait_completion(t)
+
+ response = client.list_objects_v2(Bucket=log_bucket_name)
+ keys = _get_keys(response)
+ assert len(keys) == 1
+
+ record_type = 'Standard' if not has_extensions else 'Journal'
+
+ for key in keys:
+ logger.info('logging object: %s', key)
+ assert key.startswith('log/')
+ response = client.get_object(Bucket=log_bucket_name, Key=key)
+ body = _get_body(response)
+ assert _verify_records(body, src_bucket_name, 'REST.PUT.put_obj', src_keys, record_type, num_keys)
+
+
def _bucket_logging_delete_objects(versioned):
src_bucket_name = get_new_bucket()
if versioned:
From 270d9ca7913b1cb424c9e235ebbe642910ffbed8 Mon Sep 17 00:00:00 2001
From: Yuval Lifshitz
Date: Thu, 31 Oct 2024 16:00:27 +0000
Subject: [PATCH 4/6] rgw/logging: add copy tests
also verify tests are passing with/without extensions
Signed-off-by: Yuval Lifshitz
---
s3tests_boto3/functional/test_s3.py | 210 +++++++++++++++++++---------
1 file changed, 147 insertions(+), 63 deletions(-)
diff --git a/s3tests_boto3/functional/test_s3.py b/s3tests_boto3/functional/test_s3.py
index a0e91872..d7ffe165 100644
--- a/s3tests_boto3/functional/test_s3.py
+++ b/s3tests_boto3/functional/test_s3.py
@@ -13990,6 +13990,7 @@ def _parse_log_record(record, record_type):
else:
assert False, 'unknown log record type'
+expected_object_roll_time = 5
@pytest.mark.bucket_logging
def test_put_bucket_logging():
@@ -14007,7 +14008,7 @@ def test_put_bucket_logging():
}
if has_extensions:
- logging_enabled['ObjectRollTime'] = 5
+ logging_enabled['ObjectRollTime'] = expected_object_roll_time
response = client.put_bucket_logging(Bucket=src_bucket_name, BucketLoggingStatus={
'LoggingEnabled': logging_enabled,
})
@@ -14016,9 +14017,10 @@ def test_put_bucket_logging():
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
# default value for key prefix is returned
logging_enabled['TargetObjectKeyFormat'] = {'SimplePrefix': {}}
- if has_extensions:
- logging_enabled['LoggingType'] = 'Standard'
- logging_enabled['RecordsBatchSize'] = 0
+ if not has_extensions:
+ logging_enabled['ObjectRollTime'] = expected_object_roll_time
+ logging_enabled['LoggingType'] = 'Standard'
+ logging_enabled['RecordsBatchSize'] = 0
assert response['LoggingEnabled'] == logging_enabled
# with simple target object prefix
@@ -14030,16 +14032,17 @@ def test_put_bucket_logging():
}
}
if has_extensions:
- logging_enabled['ObjectRollTime'] = 5
+ logging_enabled['ObjectRollTime'] = expected_object_roll_time
response = client.put_bucket_logging(Bucket=src_bucket_name, BucketLoggingStatus={
'LoggingEnabled': logging_enabled,
})
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
response = client.get_bucket_logging(Bucket=src_bucket_name)
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
- if has_extensions:
- logging_enabled['LoggingType'] = 'Standard'
- logging_enabled['RecordsBatchSize'] = 0
+ if not has_extensions:
+ logging_enabled['ObjectRollTime'] = expected_object_roll_time
+ logging_enabled['LoggingType'] = 'Standard'
+ logging_enabled['RecordsBatchSize'] = 0
assert response['LoggingEnabled'] == logging_enabled
# with partitioned target object prefix
@@ -14053,16 +14056,17 @@ def test_put_bucket_logging():
}
}
if has_extensions:
- logging_enabled['ObjectRollTime'] = 5
+ logging_enabled['ObjectRollTime'] = expected_object_roll_time
response = client.put_bucket_logging(Bucket=src_bucket_name, BucketLoggingStatus={
'LoggingEnabled': logging_enabled,
})
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
response = client.get_bucket_logging(Bucket=src_bucket_name)
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
- if has_extensions:
- logging_enabled['LoggingType'] = 'Standard'
- logging_enabled['RecordsBatchSize'] = 0
+ if not has_extensions:
+ logging_enabled['ObjectRollTime'] = expected_object_roll_time
+ logging_enabled['LoggingType'] = 'Standard'
+ logging_enabled['RecordsBatchSize'] = 0
assert response['LoggingEnabled'] == logging_enabled
# with target grant (not implemented in RGW)
@@ -14074,16 +14078,17 @@ def test_put_bucket_logging():
'TargetGrants': [{'Grantee': {'DisplayName': main_display_name, 'ID': main_user_id,'Type': 'CanonicalUser'},'Permission': 'FULL_CONTROL'}]
}
if has_extensions:
- logging_enabled['ObjectRollTime'] = 5
+ logging_enabled['ObjectRollTime'] = expected_object_roll_time
response = client.put_bucket_logging(Bucket=src_bucket_name, BucketLoggingStatus={
'LoggingEnabled': logging_enabled,
})
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
response = client.get_bucket_logging(Bucket=src_bucket_name)
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
- if has_extensions:
- logging_enabled['LoggingType'] = 'Standard'
- logging_enabled['RecordsBatchSize'] = 0
+ if not has_extensions:
+ logging_enabled['ObjectRollTime'] = expected_object_roll_time
+ logging_enabled['LoggingType'] = 'Standard'
+ logging_enabled['RecordsBatchSize'] = 0
# target grants are not implemented
logging_enabled.pop('TargetGrants')
# default value for key prefix is returned
@@ -14204,7 +14209,7 @@ def test_put_bucket_logging_extensions():
logging_enabled = {'TargetBucket': log_bucket_name,
'TargetPrefix': 'log/',
'LoggingType': 'Standard',
- 'ObjectRollTime': 5,
+ 'ObjectRollTime': expected_object_roll_time,
'RecordsBatchSize': 0
}
response = client.put_bucket_logging(Bucket=src_bucket_name, BucketLoggingStatus={
@@ -14254,7 +14259,7 @@ def _bucket_logging_put_objects(versioned):
# minimal configuration
logging_enabled = {'TargetBucket': log_bucket_name, 'TargetPrefix': 'log/'}
if has_extensions:
- logging_enabled['ObjectRollTime'] = 5
+ logging_enabled['ObjectRollTime'] = expected_object_roll_time
logging_enabled['LoggingType'] = 'Journal'
response = client.put_bucket_logging(Bucket=src_bucket_name, BucketLoggingStatus={
'LoggingEnabled': logging_enabled,
@@ -14276,7 +14281,7 @@ def _bucket_logging_put_objects(versioned):
response = client.list_objects_v2(Bucket=src_bucket_name)
src_keys = _get_keys(response)
- time.sleep(5)
+ time.sleep(expected_object_roll_time)
client.put_object(Bucket=src_bucket_name, Key='dummy', Body='dummy')
response = client.list_objects_v2(Bucket=log_bucket_name)
@@ -14289,7 +14294,7 @@ def _bucket_logging_put_objects(versioned):
assert key.startswith('log/')
response = client.get_object(Bucket=log_bucket_name, Key=key)
body = _get_body(response)
- assert _verify_records(body, src_bucket_name, 'REST.PUT.put_obj', src_keys, record_type, expected_count)
+ assert _verify_records(body, src_bucket_name, 'REST.PUT.OBJECT', src_keys, record_type, expected_count)
@pytest.mark.bucket_logging
@@ -14313,7 +14318,7 @@ def test_bucket_logging_put_concurrency():
# minimal configuration
logging_enabled = {'TargetBucket': log_bucket_name, 'TargetPrefix': 'log/'}
if has_extensions:
- logging_enabled['ObjectRollTime'] = 5
+ logging_enabled['ObjectRollTime'] = expected_object_roll_time
logging_enabled['LoggingType'] = 'Journal'
response = client.put_bucket_logging(Bucket=src_bucket_name, BucketLoggingStatus={
'LoggingEnabled': logging_enabled,
@@ -14333,7 +14338,7 @@ def test_bucket_logging_put_concurrency():
response = client.list_objects_v2(Bucket=src_bucket_name)
src_keys = _get_keys(response)
- time.sleep(5)
+ time.sleep(expected_object_roll_time)
t = []
for i in range(num_keys):
thr = threading.Thread(target = client.put_object,
@@ -14353,7 +14358,7 @@ def test_bucket_logging_put_concurrency():
assert key.startswith('log/')
response = client.get_object(Bucket=log_bucket_name, Key=key)
body = _get_body(response)
- assert _verify_records(body, src_bucket_name, 'REST.PUT.put_obj', src_keys, record_type, num_keys)
+ assert _verify_records(body, src_bucket_name, 'REST.PUT.OBJECT', src_keys, record_type, num_keys)
def _bucket_logging_delete_objects(versioned):
@@ -14375,7 +14380,7 @@ def _bucket_logging_delete_objects(versioned):
# minimal configuration
logging_enabled = {'TargetBucket': log_bucket_name, 'TargetPrefix': 'log/'}
if has_extensions:
- logging_enabled['ObjectRollTime'] = 5
+ logging_enabled['ObjectRollTime'] = expected_object_roll_time
logging_enabled['LoggingType'] = 'Journal'
response = client.put_bucket_logging(Bucket=src_bucket_name, BucketLoggingStatus={
'LoggingEnabled': logging_enabled,
@@ -14390,7 +14395,7 @@ def _bucket_logging_delete_objects(versioned):
client.delete_object(Bucket=src_bucket_name, Key=key, VersionId=response['VersionId'])
client.delete_object(Bucket=src_bucket_name, Key=key)
- time.sleep(5)
+ time.sleep(expected_object_roll_time)
client.put_object(Bucket=src_bucket_name, Key='dummy', Body='dummy')
response = client.list_objects_v2(Bucket=log_bucket_name)
@@ -14407,7 +14412,7 @@ def _bucket_logging_delete_objects(versioned):
response = client.get_object(Bucket=log_bucket_name, Key=key)
body = _get_body(response)
record_type = 'Standard' if not has_extensions else 'Journal'
- assert _verify_records(body, src_bucket_name, 'REST.DELETE.delete_obj', src_keys, record_type, expected_count)
+ assert _verify_records(body, src_bucket_name, 'REST.DELETE.OBJECT', src_keys, record_type, expected_count)
@pytest.mark.bucket_logging
@@ -14440,7 +14445,7 @@ def _bucket_logging_get_objects(versioned):
# minimal configuration
logging_enabled = {'TargetBucket': log_bucket_name, 'TargetPrefix': 'log/'}
if has_extensions:
- logging_enabled['ObjectRollTime'] = 5
+ logging_enabled['ObjectRollTime'] = expected_object_roll_time
logging_enabled['LoggingType'] = 'Standard'
response = client.put_bucket_logging(Bucket=src_bucket_name, BucketLoggingStatus={
'LoggingEnabled': logging_enabled,
@@ -14455,7 +14460,7 @@ def _bucket_logging_get_objects(versioned):
client.get_object(Bucket=src_bucket_name, Key=key, VersionId=response['VersionId'])
client.get_object(Bucket=src_bucket_name, Key=key)
- time.sleep(5)
+ time.sleep(expected_object_roll_time)
client.put_object(Bucket=src_bucket_name, Key='dummy', Body='dummy')
response = client.list_objects_v2(Bucket=log_bucket_name)
@@ -14471,7 +14476,7 @@ def _bucket_logging_get_objects(versioned):
assert key.startswith('log/')
response = client.get_object(Bucket=log_bucket_name, Key=key)
body = _get_body(response)
- assert _verify_records(body, src_bucket_name, 'REST.GET.get_obj', src_keys, 'Standard', expected_count)
+ assert _verify_records(body, src_bucket_name, 'REST.GET.OBJECT', src_keys, 'Standard', expected_count)
@pytest.mark.bucket_logging
@@ -14485,8 +14490,12 @@ def test_bucket_logging_get_objects_versioned():
@pytest.mark.bucket_logging
-def _bucket_logging_copy_objects(versioned):
+def _bucket_logging_copy_objects(versioned, another_bucket):
src_bucket_name = get_new_bucket()
+ if another_bucket:
+ dst_bucket_name = get_new_bucket()
+ else:
+ dst_bucket_name = src_bucket_name
if versioned:
check_configure_versioning_retry(src_bucket_name, "Enabled", "Enabled")
log_bucket_name = get_new_bucket_name()
@@ -14504,41 +14513,61 @@ def _bucket_logging_copy_objects(versioned):
# minimal configuration
logging_enabled = {'TargetBucket': log_bucket_name, 'TargetPrefix': 'log/'}
if has_extensions:
- logging_enabled['ObjectRollTime'] = 5
+ logging_enabled['ObjectRollTime'] = expected_object_roll_time
logging_enabled['LoggingType'] = 'Journal'
response = client.put_bucket_logging(Bucket=src_bucket_name, BucketLoggingStatus={
'LoggingEnabled': logging_enabled,
})
-
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+ if another_bucket:
+ response = client.put_bucket_logging(Bucket=dst_bucket_name, BucketLoggingStatus={
+ 'LoggingEnabled': logging_enabled,
+ })
+ assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
response = client.list_objects_v2(Bucket=src_bucket_name)
src_keys = _get_keys(response)
+ dst_keys = []
for key in src_keys:
- client.copy_object(Bucket=src_bucket_name, Key='copy_of_'+key, CopySource={'Bucket': src_bucket_name, 'Key': key})
+ dst_keys.append('copy_of_'+key)
+ if another_bucket:
+ client.copy_object(Bucket=dst_bucket_name, Key='copy_of_'+key, CopySource={'Bucket': src_bucket_name, 'Key': key})
+ else:
+ client.copy_object(Bucket=src_bucket_name, Key='copy_of_'+key, CopySource={'Bucket': src_bucket_name, 'Key': key})
- time.sleep(5)
+ time.sleep(expected_object_roll_time)
client.put_object(Bucket=src_bucket_name, Key='dummy', Body='dummy')
response = client.list_objects_v2(Bucket=log_bucket_name)
keys = _get_keys(response)
assert len(keys) == 1
- key= keys[0]
+ key = keys[0]
assert key.startswith('log/')
response = client.get_object(Bucket=log_bucket_name, Key=key)
body = _get_body(response)
record_type = 'Standard' if not has_extensions else 'Journal'
- assert _verify_records(body, src_bucket_name, 'REST.PUT.copy_obj', src_keys, record_type, num_keys)
+ assert _verify_records(body, dst_bucket_name, 'REST.PUT.OBJECT', dst_keys, record_type, num_keys)
@pytest.mark.bucket_logging
def test_bucket_logging_copy_objects():
- _bucket_logging_copy_objects(False)
+ _bucket_logging_copy_objects(False, False)
@pytest.mark.bucket_logging
def test_bucket_logging_copy_objects_versioned():
- _bucket_logging_copy_objects(True)
+ _bucket_logging_copy_objects(True, False)
+
+
+@pytest.mark.bucket_logging
+def test_bucket_logging_copy_objects_bucket():
+ _bucket_logging_copy_objects(False, True)
+
+
+@pytest.mark.bucket_logging
+def test_bucket_logging_copy_objects_bucket_versioned():
+ _bucket_logging_copy_objects(True, True)
@pytest.mark.bucket_logging
@@ -14558,7 +14587,7 @@ def _bucket_logging_head_objects(versioned):
logging_enabled = {'TargetBucket': log_bucket_name, 'TargetPrefix': 'log/'}
if has_extensions:
- logging_enabled['ObjectRollTime'] = 5
+ logging_enabled['ObjectRollTime'] = expected_object_roll_time
logging_enabled['LoggingType'] = 'Standard'
response = client.put_bucket_logging(Bucket=src_bucket_name, BucketLoggingStatus={
'LoggingEnabled': logging_enabled,
@@ -14574,7 +14603,7 @@ def _bucket_logging_head_objects(versioned):
else:
client.head_object(Bucket=src_bucket_name, Key=key)
- time.sleep(5)
+ time.sleep(expected_object_roll_time)
client.put_object(Bucket=src_bucket_name, Key='dummy', Body='dummy')
response = client.list_objects_v2(Bucket=log_bucket_name)
@@ -14590,7 +14619,7 @@ def _bucket_logging_head_objects(versioned):
assert key.startswith('log/')
response = client.get_object(Bucket=log_bucket_name, Key=key)
body = _get_body(response)
- assert _verify_records(body, src_bucket_name, 'REST.HEAD.get_obj', src_keys, 'Standard', expected_count)
+ assert _verify_records(body, src_bucket_name, 'REST.HEAD.OBJECT', src_keys, 'Standard', expected_count)
@pytest.mark.bucket_logging
@@ -14616,7 +14645,7 @@ def _bucket_logging_mpu(versioned):
# minimal configuration
logging_enabled = {'TargetBucket': log_bucket_name, 'TargetPrefix': 'log/'}
if has_extensions:
- logging_enabled['ObjectRollTime'] = 5
+ logging_enabled['ObjectRollTime'] = expected_object_roll_time
logging_enabled['LoggingType'] = 'Journal'
response = client.put_bucket_logging(Bucket=src_bucket_name, BucketLoggingStatus={
'LoggingEnabled': logging_enabled,
@@ -14631,7 +14660,7 @@ def _bucket_logging_mpu(versioned):
(upload_id, data, parts) = _multipart_upload(bucket_name=src_bucket_name, key=src_key, size=objlen)
client.complete_multipart_upload(Bucket=src_bucket_name, Key=src_key, UploadId=upload_id, MultipartUpload={'Parts': parts})
- time.sleep(5)
+ time.sleep(expected_object_roll_time)
client.put_object(Bucket=src_bucket_name, Key='dummy', Body='dummy')
response = client.list_objects_v2(Bucket=log_bucket_name)
@@ -14639,16 +14668,16 @@ def _bucket_logging_mpu(versioned):
assert len(keys) == 1
if versioned:
- expected_count = 2
+ expected_count = 4 if not has_extensions else 2
else:
- expected_count = 1
+ expected_count = 2 if not has_extensions else 1
key = keys[0]
assert key.startswith('log/')
response = client.get_object(Bucket=log_bucket_name, Key=key)
body = _get_body(response)
record_type = 'Standard' if not has_extensions else 'Journal'
- assert _verify_records(body, src_bucket_name, 'REST.POST.complete_multipart', [src_key], record_type, expected_count)
+ assert _verify_records(body, src_bucket_name, 'REST.POST.UPLOAD', [src_key, src_key], record_type, expected_count)
@pytest.mark.bucket_logging
@@ -14661,6 +14690,61 @@ def test_bucket_logging_mpu_versioned():
_bucket_logging_mpu(True)
+@pytest.mark.bucket_logging
+def _bucket_logging_mpu_copy(versioned):
+ src_bucket_name = get_new_bucket()
+ if versioned:
+ check_configure_versioning_retry(src_bucket_name, "Enabled", "Enabled")
+ log_bucket_name = get_new_bucket_name()
+ log_bucket = get_new_bucket_resource(name=log_bucket_name)
+ client = get_client()
+ has_extensions = _has_bucket_logging_extension()
+
+ src_key = "myobject"
+ objlen = 30 * 1024 * 1024
+ (upload_id, data, parts) = _multipart_upload(bucket_name=src_bucket_name, key=src_key, size=objlen)
+ client.complete_multipart_upload(Bucket=src_bucket_name, Key=src_key, UploadId=upload_id, MultipartUpload={'Parts': parts})
+ if versioned:
+ (upload_id, data, parts) = _multipart_upload(bucket_name=src_bucket_name, key=src_key, size=objlen)
+ client.complete_multipart_upload(Bucket=src_bucket_name, Key=src_key, UploadId=upload_id, MultipartUpload={'Parts': parts})
+
+ # minimal configuration
+ logging_enabled = {'TargetBucket': log_bucket_name, 'TargetPrefix': 'log/'}
+ if has_extensions:
+ logging_enabled['ObjectRollTime'] = expected_object_roll_time
+ logging_enabled['LoggingType'] = 'Journal'
+ response = client.put_bucket_logging(Bucket=src_bucket_name, BucketLoggingStatus={
+ 'LoggingEnabled': logging_enabled,
+ })
+ assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+ client.copy_object(Bucket=src_bucket_name, Key='copy_of_'+src_key, CopySource={'Bucket': src_bucket_name, 'Key': src_key})
+
+ time.sleep(expected_object_roll_time)
+ client.put_object(Bucket=src_bucket_name, Key='dummy', Body='dummy')
+
+ response = client.list_objects_v2(Bucket=log_bucket_name)
+ keys = _get_keys(response)
+ assert len(keys) == 1
+
+ key = keys[0]
+ assert key.startswith('log/')
+ response = client.get_object(Bucket=log_bucket_name, Key=key)
+ body = _get_body(response)
+ record_type = 'Standard' if not has_extensions else 'Journal'
+ assert _verify_records(body, src_bucket_name, 'REST.PUT.OBJECT', ['copy_of_'+src_key], record_type, 1)
+
+
+@pytest.mark.bucket_logging
+def test_bucket_logging_mpu_copy():
+ _bucket_logging_mpu_copy(False)
+
+
+@pytest.mark.bucket_logging
+def test_bucket_logging_mpu_copy_versioned():
+ _bucket_logging_mpu_copy(True)
+
+
def _bucket_logging_multi_delete(versioned):
src_bucket_name = get_new_bucket()
if versioned:
@@ -14680,7 +14764,7 @@ def _bucket_logging_multi_delete(versioned):
# minimal configuration
logging_enabled = {'TargetBucket': log_bucket_name, 'TargetPrefix': 'log/'}
if has_extensions:
- logging_enabled['ObjectRollTime'] = 5
+ logging_enabled['ObjectRollTime'] = expected_object_roll_time
logging_enabled['LoggingType'] = 'Journal'
response = client.put_bucket_logging(Bucket=src_bucket_name, BucketLoggingStatus={
'LoggingEnabled': logging_enabled,
@@ -14701,7 +14785,7 @@ def _bucket_logging_multi_delete(versioned):
objs_dict = _make_objs_dict(key_names=src_keys)
client.delete_objects(Bucket=src_bucket_name, Delete=objs_dict)
- time.sleep(5)
+ time.sleep(expected_object_roll_time)
client.put_object(Bucket=src_bucket_name, Key='dummy', Body='dummy')
response = client.list_objects_v2(Bucket=log_bucket_name)
@@ -14718,7 +14802,7 @@ def _bucket_logging_multi_delete(versioned):
response = client.get_object(Bucket=log_bucket_name, Key=key)
body = _get_body(response)
record_type = 'Standard' if not has_extensions else 'Journal'
- assert _verify_records(body, src_bucket_name, "REST.POST.multi_object_delete", src_keys, record_type, expected_count)
+ assert _verify_records(body, src_bucket_name, "REST.POST.DELETE_MULTI_OBJECT", src_keys, record_type, expected_count)
@pytest.mark.bucket_logging
@@ -14740,7 +14824,7 @@ def _bucket_logging_type(logging_type):
logging_enabled = {
'TargetBucket': log_bucket_name,
'TargetPrefix': 'log/',
- 'ObjectRollTime': 5,
+ 'ObjectRollTime': expected_object_roll_time,
'LoggingType': logging_type
}
response = client.put_bucket_logging(Bucket=src_bucket_name, BucketLoggingStatus={
@@ -14755,7 +14839,7 @@ def _bucket_logging_type(logging_type):
response = client.list_objects_v2(Bucket=src_bucket_name)
src_keys = _get_keys(response)
- time.sleep(5)
+ time.sleep(expected_object_roll_time)
client.put_object(Bucket=src_bucket_name, Key='dummy', Body='dummy')
client.head_object(Bucket=src_bucket_name, Key='dummy')
@@ -14768,11 +14852,11 @@ def _bucket_logging_type(logging_type):
response = client.get_object(Bucket=log_bucket_name, Key=key)
body = _get_body(response)
if logging_type == 'Journal':
- assert _verify_records(body, src_bucket_name, 'REST.PUT.put_obj', src_keys, 'Journal', num_keys)
- assert _verify_records(body, src_bucket_name, 'REST.HEAD.get_obj', src_keys, 'Journal', num_keys) == False
+ assert _verify_records(body, src_bucket_name, 'REST.PUT.OBJECT', src_keys, 'Journal', num_keys)
+ assert _verify_records(body, src_bucket_name, 'REST.HEAD.OBJECT', src_keys, 'Journal', num_keys) == False
elif logging_type == 'Standard':
- assert _verify_records(body, src_bucket_name, 'REST.HEAD.get_obj', src_keys, 'Standard', num_keys)
- assert _verify_records(body, src_bucket_name, 'REST.PUT.put_obj', src_keys, 'Standard', num_keys)
+ assert _verify_records(body, src_bucket_name, 'REST.HEAD.OBJECT', src_keys, 'Standard', num_keys)
+ assert _verify_records(body, src_bucket_name, 'REST.PUT.OBJECT', src_keys, 'Standard', num_keys)
else:
assert False, 'invalid logging type:'+logging_type
@@ -14837,7 +14921,7 @@ def test_bucket_logging_roll_time():
assert key.startswith('log/')
response = client.get_object(Bucket=log_bucket_name, Key=key)
body = _get_body(response)
- assert _verify_records(body, src_bucket_name, 'REST.PUT.put_obj', src_keys, 'Standard', num_keys)
+ assert _verify_records(body, src_bucket_name, 'REST.PUT.OBJECT', src_keys, 'Standard', num_keys)
client.delete_object(Bucket=log_bucket_name, Key=key)
num_keys = 25
@@ -14861,7 +14945,7 @@ def test_bucket_logging_roll_time():
assert key.startswith('log/')
response = client.get_object(Bucket=log_bucket_name, Key=key)
body += _get_body(response)
- assert _verify_records(body, src_bucket_name, 'REST.PUT.put_obj', src_keys, 'Standard', num_keys+1)
+ assert _verify_records(body, src_bucket_name, 'REST.PUT.OBJECT', src_keys, 'Standard', num_keys+1)
@pytest.mark.bucket_logging
@@ -14879,7 +14963,7 @@ def test_bucket_logging_multiple_prefixes():
src_bucket = get_new_bucket_resource(name=src_bucket_name)
logging_enabled = {'TargetBucket': log_bucket_name, 'TargetPrefix': src_bucket_name+'/'}
if has_extensions:
- logging_enabled['ObjectRollTime'] = 5
+ logging_enabled['ObjectRollTime'] = expected_object_roll_time
response = client.put_bucket_logging(Bucket=src_bucket_name, BucketLoggingStatus={
'LoggingEnabled': logging_enabled,
})
@@ -14892,7 +14976,7 @@ def test_bucket_logging_multiple_prefixes():
name = 'myobject'+str(j)
client.put_object(Bucket=src_bucket_name, Key=name, Body=randcontent())
- time.sleep(5)
+ time.sleep(expected_object_roll_time)
for src_bucket_name in buckets:
client.head_object(Bucket=src_bucket_name, Key='myobject0')
@@ -14909,7 +14993,7 @@ def test_bucket_logging_multiple_prefixes():
found = True
response = client.list_objects_v2(Bucket=src_bucket_name)
src_keys = _get_keys(response)
- assert _verify_records(body, src_bucket_name, 'REST.PUT.put_obj', src_keys, 'Standard', num_keys)
+ assert _verify_records(body, src_bucket_name, 'REST.PUT.OBJECT', src_keys, 'Standard', num_keys)
assert found
@@ -14929,7 +15013,7 @@ def test_bucket_logging_single_prefix():
# minimal configuration
logging_enabled = {'TargetBucket': log_bucket_name, 'TargetPrefix': 'log/'}
if has_extensions:
- logging_enabled['ObjectRollTime'] = 5
+ logging_enabled['ObjectRollTime'] = expected_object_roll_time
response = client.put_bucket_logging(Bucket=src_bucket_name, BucketLoggingStatus={
'LoggingEnabled': logging_enabled,
})
@@ -14944,7 +15028,7 @@ def test_bucket_logging_single_prefix():
name = 'myobject'+str(bucket_ind)+str(j)
client.put_object(Bucket=src_bucket_name, Key=name, Body=randcontent())
- time.sleep(5)
+ time.sleep(expected_object_roll_time)
client.put_object(Bucket=buckets[0], Key='dummy', Body='dummy')
response = client.list_objects_v2(Bucket=log_bucket_name)
@@ -14958,5 +15042,5 @@ def test_bucket_logging_single_prefix():
for src_bucket_name in buckets:
response = client.list_objects_v2(Bucket=src_bucket_name)
src_keys = _get_keys(response)
- found = _verify_records(body, src_bucket_name, 'REST.PUT.put_obj', src_keys, 'Standard', num_keys)
+ found = _verify_records(body, src_bucket_name, 'REST.PUT.OBJECT', src_keys, 'Standard', num_keys)
assert found
From b78bdf678f89fa3d9403025cb05d6926dcdcd15d Mon Sep 17 00:00:00 2001
From: Yuval Lifshitz
Date: Thu, 21 Nov 2024 14:06:43 +0000
Subject: [PATCH 5/6] rgw/logging: support case of no extensions
as well as older boto versions with missing field
Signed-off-by: Yuval Lifshitz
---
s3tests_boto3/functional/test_s3.py | 156 +++++++++++++++-------------
1 file changed, 86 insertions(+), 70 deletions(-)
diff --git a/s3tests_boto3/functional/test_s3.py b/s3tests_boto3/functional/test_s3.py
index d7ffe165..039ea113 100644
--- a/s3tests_boto3/functional/test_s3.py
+++ b/s3tests_boto3/functional/test_s3.py
@@ -13930,6 +13930,21 @@ def _has_bucket_logging_extension():
return False
return True
+def _has_taget_object_key_format():
+ src_bucket_name = get_new_bucket_name()
+ src_bucket = get_new_bucket_resource(name=src_bucket_name)
+ log_bucket_name = get_new_bucket_name()
+ log_bucket = get_new_bucket_resource(name=log_bucket_name)
+ client = get_client()
+ logging_enabled = {'TargetBucket': log_bucket_name, 'TargetPrefix': 'log/', 'TargetObjectKeyFormat': {'SimplePrefix': {}}}
+ try:
+ response = client.put_bucket_logging(Bucket=src_bucket_name, BucketLoggingStatus={
+ 'LoggingEnabled': logging_enabled,
+ })
+ except ParamValidationError as e:
+ return False
+ return True
+
import shlex
@@ -14000,6 +14015,7 @@ def test_put_bucket_logging():
log_bucket = get_new_bucket_resource(name=log_bucket_name)
client = get_client()
has_extensions = _has_bucket_logging_extension()
+ has_key_format = _has_taget_object_key_format()
# minimal configuration
logging_enabled = {
@@ -14015,59 +14031,58 @@ def test_put_bucket_logging():
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
response = client.get_bucket_logging(Bucket=src_bucket_name)
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
- # default value for key prefix is returned
- logging_enabled['TargetObjectKeyFormat'] = {'SimplePrefix': {}}
- if not has_extensions:
- logging_enabled['ObjectRollTime'] = expected_object_roll_time
- logging_enabled['LoggingType'] = 'Standard'
- logging_enabled['RecordsBatchSize'] = 0
- assert response['LoggingEnabled'] == logging_enabled
-
- # with simple target object prefix
- logging_enabled = {
- 'TargetBucket': log_bucket_name,
- 'TargetPrefix': 'log/',
- 'TargetObjectKeyFormat': {
- 'SimplePrefix': {}
- }
- }
if has_extensions:
- logging_enabled['ObjectRollTime'] = expected_object_roll_time
- response = client.put_bucket_logging(Bucket=src_bucket_name, BucketLoggingStatus={
- 'LoggingEnabled': logging_enabled,
- })
- assert response['ResponseMetadata']['HTTPStatusCode'] == 200
- response = client.get_bucket_logging(Bucket=src_bucket_name)
- assert response['ResponseMetadata']['HTTPStatusCode'] == 200
- if not has_extensions:
- logging_enabled['ObjectRollTime'] = expected_object_roll_time
- logging_enabled['LoggingType'] = 'Standard'
- logging_enabled['RecordsBatchSize'] = 0
+ logging_enabled['LoggingType'] = 'Standard'
+ logging_enabled['RecordsBatchSize'] = 0
+ if has_key_format:
+ # default value for key prefix is returned
+ logging_enabled['TargetObjectKeyFormat'] = {'SimplePrefix': {}}
assert response['LoggingEnabled'] == logging_enabled
-
- # with partitioned target object prefix
- logging_enabled = {
- 'TargetBucket': log_bucket_name,
- 'TargetPrefix': 'log/',
- 'TargetObjectKeyFormat': {
- 'PartitionedPrefix': {
- 'PartitionDateSource': 'DeliveryTime'
+
+ if has_key_format:
+ # with simple target object prefix
+ logging_enabled = {
+ 'TargetBucket': log_bucket_name,
+ 'TargetPrefix': 'log/',
+ 'TargetObjectKeyFormat': {
+ 'SimplePrefix': {}
}
}
- }
- if has_extensions:
- logging_enabled['ObjectRollTime'] = expected_object_roll_time
- response = client.put_bucket_logging(Bucket=src_bucket_name, BucketLoggingStatus={
- 'LoggingEnabled': logging_enabled,
- })
- assert response['ResponseMetadata']['HTTPStatusCode'] == 200
- response = client.get_bucket_logging(Bucket=src_bucket_name)
- assert response['ResponseMetadata']['HTTPStatusCode'] == 200
- if not has_extensions:
- logging_enabled['ObjectRollTime'] = expected_object_roll_time
- logging_enabled['LoggingType'] = 'Standard'
- logging_enabled['RecordsBatchSize'] = 0
- assert response['LoggingEnabled'] == logging_enabled
+ if has_extensions:
+ logging_enabled['ObjectRollTime'] = expected_object_roll_time
+ response = client.put_bucket_logging(Bucket=src_bucket_name, BucketLoggingStatus={
+ 'LoggingEnabled': logging_enabled,
+ })
+ assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+ response = client.get_bucket_logging(Bucket=src_bucket_name)
+ assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+ if has_extensions:
+ logging_enabled['LoggingType'] = 'Standard'
+ logging_enabled['RecordsBatchSize'] = 0
+ assert response['LoggingEnabled'] == logging_enabled
+
+ # with partitioned target object prefix
+ logging_enabled = {
+ 'TargetBucket': log_bucket_name,
+ 'TargetPrefix': 'log/',
+ 'TargetObjectKeyFormat': {
+ 'PartitionedPrefix': {
+ 'PartitionDateSource': 'DeliveryTime'
+ }
+ }
+ }
+ if has_extensions:
+ logging_enabled['ObjectRollTime'] = expected_object_roll_time
+ response = client.put_bucket_logging(Bucket=src_bucket_name, BucketLoggingStatus={
+ 'LoggingEnabled': logging_enabled,
+ })
+ assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+ response = client.get_bucket_logging(Bucket=src_bucket_name)
+ assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+ if has_extensions:
+ logging_enabled['LoggingType'] = 'Standard'
+ logging_enabled['RecordsBatchSize'] = 0
+ assert response['LoggingEnabled'] == logging_enabled
# with target grant (not implemented in RGW)
main_display_name = get_main_display_name()
@@ -14085,14 +14100,14 @@ def test_put_bucket_logging():
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
response = client.get_bucket_logging(Bucket=src_bucket_name)
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
- if not has_extensions:
- logging_enabled['ObjectRollTime'] = expected_object_roll_time
- logging_enabled['LoggingType'] = 'Standard'
- logging_enabled['RecordsBatchSize'] = 0
+ if has_extensions:
+ logging_enabled['LoggingType'] = 'Standard'
+ logging_enabled['RecordsBatchSize'] = 0
# target grants are not implemented
logging_enabled.pop('TargetGrants')
- # default value for key prefix is returned
- logging_enabled['TargetObjectKeyFormat'] = {'SimplePrefix': {}}
+ if has_key_format:
+ # default value for key prefix is returned
+ logging_enabled['TargetObjectKeyFormat'] = {'SimplePrefix': {}}
assert response['LoggingEnabled'] == logging_enabled
@@ -14137,23 +14152,24 @@ def test_put_bucket_logging_errors():
except ClientError as e:
assert e.response['Error']['Code'] == 'InvalidArgument'
- # invalid partition prefix
- logging_enabled = {
- 'TargetBucket': log_bucket_name1,
- 'TargetPrefix': 'log/',
- 'TargetObjectKeyFormat': {
- 'PartitionedPrefix': {
- 'PartitionDateSource': 'kaboom'
+ if _has_taget_object_key_format():
+ # invalid partition prefix
+ logging_enabled = {
+ 'TargetBucket': log_bucket_name1,
+ 'TargetPrefix': 'log/',
+ 'TargetObjectKeyFormat': {
+ 'PartitionedPrefix': {
+ 'PartitionDateSource': 'kaboom'
+ }
}
}
- }
- try:
- response = client.put_bucket_logging(Bucket=src_bucket_name, BucketLoggingStatus={
- 'LoggingEnabled': logging_enabled,
- })
- assert False, 'expected failure'
- except ClientError as e:
- assert e.response['Error']['Code'] == 'MalformedXML'
+ try:
+ response = client.put_bucket_logging(Bucket=src_bucket_name, BucketLoggingStatus={
+ 'LoggingEnabled': logging_enabled,
+ })
+ assert False, 'expected failure'
+ except ClientError as e:
+ assert e.response['Error']['Code'] == 'MalformedXML'
# TODO: log bucket is encrypted
#_put_bucket_encryption_s3(client, log_bucket_name)
From 877ec17a66cbed737a4281d3e9b375563a88e9be Mon Sep 17 00:00:00 2001
From: Yuval Lifshitz
Date: Thu, 21 Nov 2024 15:46:09 +0000
Subject: [PATCH 6/6] rgw/logging: add boto3 extension file
Signed-off-by: Yuval Lifshitz
---
README.rst | 2 +-
s3tests_boto3/service-2.sdk-extras.json | 347 ++++++++++++++++++++++++
2 files changed, 348 insertions(+), 1 deletion(-)
create mode 100644 s3tests_boto3/service-2.sdk-extras.json
diff --git a/README.rst b/README.rst
index 283b7e87..e9f493df 100644
--- a/README.rst
+++ b/README.rst
@@ -104,7 +104,7 @@ You can filter tests based on their attributes::
Bucket logging tests
========================
-Ceph has extensions for the bucket logging S3 API. For the tests to cover these extensions, the following file: `examples/rgw/boto3/service-2.sdk-extras.json` from the Ceph repo,
+Ceph has extensions for the bucket logging S3 API. For the tests to cover these extensions, the following file: `s3tests_boto3/service-2.sdk-extras.json`,
should be copied to the: `~/.aws/models/s3/2006-03-01/` directory on the machine where the tests are run.
If the file is not present, the tests will still run, but the extension tests will be skipped. In this case, the bucket logging object roll time must be decreased manually from its default of
300 seconds to 5 seconds::
diff --git a/s3tests_boto3/service-2.sdk-extras.json b/s3tests_boto3/service-2.sdk-extras.json
new file mode 100644
index 00000000..5c22ee9f
--- /dev/null
+++ b/s3tests_boto3/service-2.sdk-extras.json
@@ -0,0 +1,347 @@
+{
+"version": 1.0,
+"merge": {
+ "operations":{
+ "DeleteBucketNotificationConfiguration":{
+ "name":"DeleteBucketNotificationConfiguration",
+ "http":{
+ "method":"DELETE",
+ "requestUri":"/{Bucket}?notification",
+ "responseCode":204
+ },
+ "input":{"shape":"DeleteBucketNotificationConfigurationRequest"},
+ "documentationUrl":"https://docs.ceph.com/docs/master/radosgw/s3/bucketops/#delete-notification",
+ "documentation":"Deletes the notification configuration from the bucket.
"
+ },
+ "GetUsageStats":{
+ "name":"GetUsageStats",
+ "http":{
+ "method":"GET",
+ "requestUri":"/?usage",
+ "responseCode":200
+ },
+ "output": {"shape": "GetUsageStatsOutput"},
+ "documentationUrl":"https://docs.ceph.com/docs/master/radosgw/s3/serviceops#get-usage-stats",
+ "documentation":"Get usage stats for the user
"
+ }
+ },
+ "shapes": {
+ "ListObjectsRequest": {
+ "members": {
+ "AllowUnordered": {
+ "shape":"AllowUnordered",
+ "documentation":"Allow the listing results to be returned in unsorted order. This may be faster when listing very large buckets.
",
+ "location":"querystring",
+ "locationName":"allow-unordered"
+ }
+ }
+ },
+ "ListObjectsV2Request": {
+ "members": {
+ "AllowUnordered": {
+ "shape":"AllowUnordered",
+ "documentation":"Allow the listing results to be returned in unsorted order. This may be faster when listing very large buckets.
",
+ "location":"querystring",
+ "locationName":"allow-unordered"
+ }
+ }
+ },
+ "ReplicationRule":{
+ "members":{
+ "Source": {
+ "shape":"S3RepSource",
+ "documentation":"A container for information about the replication source.
",
+ "locationName":"Source"
+ }
+ }
+ },
+ "S3RepSource": {
+ "type": "structure",
+ "members": {
+ "Zones": {
+ "shape":"ZoneList",
+ "documentation":"
Array of replication source zone names.
",
+ "locationName":"Zone"
+ }
+ }
+ },
+ "Destination": {
+ "members": {
+ "Zones": {
+ "shape":"ZoneList",
+ "documentation":"Array of replication destination zone names.
",
+ "locationName":"Zone"
+ }
+ }
+ },
+ "ZoneList": {
+ "type":"list",
+ "member":{"shape":"Zone"},
+ "flattened":true
+ },
+ "Zone":{"type":"string"},
+ "AllowUnordered":{"type":"boolean"},
+ "PutObjectRequest": {
+ "members": {
+ "AppendPosition": {
+ "shape":"AppendPosition",
+ "documentation": "Position to allow appending
",
+ "location": "querystring",
+ "locationName": "position"
+ },
+ "Append": {
+ "shape":"Append",
+ "documentation":"Append Object
",
+ "location": "querystring",
+ "locationName": "append"
+ }
+ }
+ },
+ "Append": {"type":"boolean"},
+ "AppendPosition":{"type":"integer"},
+ "PutObjectOutput": {
+ "members": {
+ "AppendPosition": {
+ "shape":"AppendPosition",
+ "documentation": "Position to allow appending
",
+ "location": "header",
+ "locationName": "x-rgw-next-append-position",
+ "documentationUrl":"https://docs.ceph.com/docs/master/radosgw/s3/objectops/#append-object"
+ }
+ }
+ },
+ "GetBucketNotificationConfigurationRequest":{
+ "type":"structure",
+ "required":["Bucket"],
+ "members":{
+ "Bucket":{
+ "shape":"BucketName",
+ "documentation":"Name of the bucket to get the notifications configuration for.
",
+ "location":"uri",
+ "locationName":"Bucket"
+ },
+ "Notification":{
+ "shape":"NotificationId",
+ "documentation":"Id of the specific notification on the bucket for which the configuration should be retrieved.
",
+ "location":"querystring",
+ "locationName":"notification-id"
+ }
+ }
+ },
+ "DeleteBucketNotificationConfigurationRequest":{
+ "type":"structure",
+ "required":["Bucket"],
+ "members":{
+ "Bucket":{
+ "shape":"BucketName",
+ "documentation":"Name of the bucket to delete the notifications configuration from.
",
+ "location":"uri",
+ "locationName":"Bucket"
+ },
+ "Notification":{
+ "shape":"NotificationId",
+ "documentation":"Id of the specific notification on the bucket to be deleted.
",
+ "location":"querystring",
+ "locationName":"notification-id"
+ }
+ }
+ },
+ "FilterRule":{
+ "type":"structure",
+ "members":{
+ "Name":{
+ "shape":"FilterRuleName",
+ "documentation":"The object key name prefix, suffix or regex identifying one or more objects to which the filtering rule applies. The maximum length is 1,024 characters. Overlapping prefixes and suffixes are supported.
"
+ },
+ "Value":{
+ "shape":"FilterRuleValue",
+ "documentation":"The value that the filter searches for in object key names.
"
+ }
+ },
+ "documentation":"Specifies the Amazon S3 object key name to filter on and whether to filter on the suffix, prefix or regex of the key name.
"
+ },
+ "FilterRuleName":{
+ "type":"string",
+ "enum":[
+ "prefix",
+ "suffix",
+ "regex"
+ ]
+ },
+ "NotificationConfigurationFilter":{
+ "type":"structure",
+ "members":{
+ "Key":{
+ "shape":"S3KeyFilter",
+ "documentation":"",
+ "locationName":"S3Key"
+ },
+ "Metadata":{
+ "shape":"S3MetadataFilter",
+ "documentation":"",
+ "locationName":"S3Metadata"
+ },
+ "Tags":{
+ "shape":"S3TagsFilter",
+ "documentation":"",
+ "locationName":"S3Tags"
+ }
+
+ },
+ "documentation":"Specifies object key name filtering rules. For information about key name filtering, see Configuring Event Notifications in the Amazon Simple Storage Service Developer Guide.
"
+ },
+ "S3KeyFilter":{
+ "type":"structure",
+ "members":{
+ "FilterRules":{
+ "shape":"FilterRuleList",
+ "documentation":"",
+ "locationName":"FilterRule"
+ }
+ },
+ "documentation":"A container for object key name prefix, suffix and regex filtering rules.
"
+ },
+ "S3MetadataFilter":{
+ "type":"structure",
+ "members":{
+ "FilterRules":{
+ "shape":"FilterRuleList",
+ "documentation":"",
+ "locationName":"FilterRule"
+ }
+ },
+ "documentation":"A container for metadata filtering rules.
"
+ },
+ "S3TagsFilter":{
+ "type":"structure",
+ "members":{
+ "FilterRules":{
+ "shape":"FilterRuleList",
+ "documentation":"",
+ "locationName":"FilterRule"
+ }
+ },
+ "documentation":"A container for object tags filtering rules.
"
+ },
+ "GetUsageStatsOutput": {
+ "type": "structure",
+ "members": {
+ "Summary": {
+ "shape":"UsageStatsSummary",
+ "documentation": ""
+ }
+ }
+ },
+ "UsageStatsSummary": {
+ "type": "structure",
+ "members": {
+ "QuotaMaxBytes":{"shape":"QuotaMaxBytes"},
+ "QuotaMaxBuckets":{"shape": "QuotaMaxBuckets"},
+ "QuotaMaxObjCount":{"shape":"QuotaMaxObjCount"},
+ "QuotaMaxBytesPerBucket":{"shape":"QuotaMaxBytesPerBucket"},
+ "QuotaMaxObjCountPerBucket":{"shape":"QuotaMaxObjCountPerBucket"},
+ "TotalBytes":{"shape":"TotalBytes"},
+ "TotalBytesRounded":{"shape":"TotalBytesRounded"},
+ "TotalEntries":{"shape":"TotalEntries"}
+ }
+ },
+ "QuotaMaxBytes":{"type":"integer"},
+ "QuotaMaxBuckets":{"type": "integer"},
+ "QuotaMaxObjCount":{"type":"integer"},
+ "QuotaMaxBytesPerBucket":{"type":"integer"},
+ "QuotaMaxObjCountPerBucket":{"type":"integer"},
+ "TotalBytesRounded":{"type":"integer"},
+ "TotalBytes":{"type":"integer"},
+ "TotalEntries":{"type":"integer"},
+ "LoggingEnabled":{
+ "type":"structure",
+ "required":[
+ "TargetBucket",
+ "TargetPrefix"
+ ],
+ "members":{
+ "TargetBucket":{
+ "shape":"TargetBucket",
+ "documentation":"Specifies the bucket where you want to store server access logs. You can have your logs delivered to any bucket that you own. You can also configure multiple buckets to deliver their logs to the same target bucket. In this case, you should choose a different TargetPrefix
for each source bucket so that the delivered log files can be distinguished by key.
"
+ },
+ "TargetGrants":{
+ "shape":"TargetGrants",
+ "documentation":"Container for granting information.
Should be used when the write permissions to the tagert bucket should eb different than the permissions of the user performing the operation thta needs to be logged. This is usually used in cased of batched logging. see: RecordBatchSize
.
"
+ },
+ "TargetPrefix":{
+ "shape":"TargetPrefix",
+ "documentation":"A prefix for all log object keys. If you store log files from multiple buckets in a single bucket, you can use a prefix to distinguish which log files came from which bucket.
"
+ },
+ "TargetObjectKeyFormat":{
+ "shape":"TargetObjectKeyFormat",
+ "documentation":"key format for log objects.
"
+ },
+ "ObjectRollTime":{
+ "shape":"ObjectRollTime",
+ "documentation":"time in seconds to move the log object to the target bucket and start another log object.
"
+ },
+ "LoggingType":{
+ "shape":"LoggingType",
+ "documentation":"use Standard log type to log all bucket operations i nthe standard format. use Journal log type to log only creations and deletion of objects in more compact format.
"
+ },
+ "RecordsBatchSize":{
+ "shape":"RecordsBatchSize",
+ "documentation":"indicates how many records to batch in memory before writing to the object. if set to zero, records are written syncronously to the object. if ObjectRollTime
e is reached, the batch of records will be written to the object regardless of the number of records.
"
+ }
+ },
+ "documentation":"Describes where logs are stored the prefix assigned to all log object keys for a bucket, and their format. also, the level the delivery guarantee of the records.
"
+ },
+ "TargetObjectKeyFormat":{
+ "type":"structure",
+ "members":{
+ "SimplePrefix":{
+ "shape":"SimplePrefix",
+ "documentation":"To use the simple format for S3 keys for log objects. To specify SimplePrefix format, set SimplePrefix to {}.
",
+ "locationName":"SimplePrefix"
+ },
+ "PartitionedPrefix":{
+ "shape":"PartitionedPrefix",
+ "documentation":"Partitioned S3 key for log objects.
",
+ "locationName":"PartitionedPrefix"
+ }
+ },
+ "documentation":"Key format for log objects. Only one format, PartitionedPrefix or SimplePrefix, is allowed.
"
+ },
+ "SimplePrefix":{
+ "type":"structure",
+ "members":{
+ },
+ "documentation":"To use simple format for S3 keys for log objects, set SimplePrefix to an empty object.
[DestinationPrefix][YYYY]-[MM]-[DD]-[hh]-[mm]-[ss]-[UniqueString]
",
+ "locationName":"SimplePrefix"
+ },
+ "PartitionDateSource":{
+ "type":"string",
+ "enum":[
+ "EventTime",
+ "DeliveryTime"
+ ]
+ },
+ "PartitionedPrefix":{
+ "type":"structure",
+ "members":{
+ "PartitionDateSource":{
+ "shape":"PartitionDateSource",
+ "documentation":"Specifies the partition date source for the partitioned prefix. PartitionDateSource can be EventTime or DeliveryTime.
"
+ }
+ },
+ "documentation":"Amazon S3 keys for log objects are partitioned in the following format:
[DestinationPrefix][SourceAccountId]/[SourceRegion]/[SourceBucket]/[YYYY]/[MM]/[DD]/[YYYY]-[MM]-[DD]-[hh]-[mm]-[ss]-[UniqueString]
PartitionedPrefix defaults to EventTime delivery when server access logs are delivered.
",
+ "locationName":"PartitionedPrefix"
+ },
+ "ObjectRollTime":{"type":"integer"},
+ "RecordsBatchSize":{"type":"integer"},
+ "LoggingType":{
+ "type":"string",
+ "enum": [
+ "Standard",
+ "Journal"
+ ]
+ }
+ },
+ "documentation":""
+}
+}