Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

merge s3_url parameter into endpoint_url #994

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions changelogs/fragments/994-s3_url.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
minor_changes:
- s3_bucket - the ``s3_url`` parameter was merged into the ``endpoint_url`` parameter, ``s3_url`` remains as an alias (https://github.com/ansible-collections/amazon.aws/pull/994).
- s3_bucket - ``rgw`` was added as an alias for the ``ceph`` parameter for consistency with the ``s3_object`` module (https://github.com/ansible-collections/amazon.aws/pull/994).
- s3_object - the ``s3_url`` parameter was merged into the ``endpoint_url`` parameter, ``s3_url`` remains as an alias (https://github.com/ansible-collections/amazon.aws/pull/994).
- s3_object - the ``rgw`` parameter was renamed to ``ceph`` for consistency with the ``s3_bucket`` module, ``rgw`` remains as an alias (https://github.com/ansible-collections/amazon.aws/pull/994).
2 changes: 1 addition & 1 deletion plugins/doc_fragments/aws.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ class ModuleDocFragment(object):
Ignored for modules where region is required. Must be specified for all other modules if region is not used.
If not set then the value of the EC2_URL environment variable, if any, is used.
type: str
aliases: [ ec2_url, aws_endpoint_url ]
aliases: [ ec2_url, aws_endpoint_url, s3_url ]
aws_secret_key:
description:
- C(AWS secret key). If not set then the value of the C(AWS_SECRET_ACCESS_KEY), C(AWS_SECRET_KEY), or C(EC2_SECRET_KEY) environment variable is used.
Expand Down
2 changes: 1 addition & 1 deletion plugins/module_utils/modules.py
Original file line number Diff line number Diff line change
Expand Up @@ -349,7 +349,7 @@ def _aws_common_argument_spec():
"""
return dict(
debug_botocore_endpoint_logs=dict(fallback=(env_fallback, ['ANSIBLE_DEBUG_BOTOCORE_LOGS']), default=False, type='bool'),
endpoint_url=dict(aliases=['ec2_url', 'aws_endpoint_url']),
endpoint_url=dict(aliases=['ec2_url', 'aws_endpoint_url', 's3_url']),
aws_access_key=dict(aliases=['ec2_access_key', 'access_key'], no_log=False),
aws_secret_key=dict(aliases=['ec2_secret_key', 'secret_key'], no_log=True),
security_token=dict(aliases=['access_token', 'aws_security_token', 'session_token', 'aws_session_token'], no_log=True),
Expand Down
74 changes: 40 additions & 34 deletions plugins/modules/s3_bucket.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,9 @@
version_added: 1.0.0
short_description: Manage S3 buckets in AWS, DigitalOcean, Ceph, Walrus, FakeS3 and StorageGRID
description:
- Manage S3 buckets in AWS, DigitalOcean, Ceph, Walrus, FakeS3 and StorageGRID.
- Manage S3 buckets.
- Compatible with AWS, DigitalOcean, Ceph, Walrus, FakeS3 and StorageGRID.
- When using non-AWS services, I(endpoint_url) should be specified.
author:
- Rob White (@wimnat)
- Aubin Bikouo (@abikouo)
Expand All @@ -43,16 +45,13 @@
description:
- The JSON policy as a string. Set to the string C("null") to force the absence of a policy.
type: json
s3_url:
description:
- S3 URL endpoint for usage with DigitalOcean, Ceph, Eucalyptus and FakeS3 etc.
- Assumes AWS if not specified.
- For Walrus, use FQDN of the endpoint without scheme nor path.
type: str
ceph:
description:
- Enable API compatibility with Ceph. It takes into account the S3 API subset working
with Ceph in order to provide the same module behaviour where possible.
- Enable API compatibility with Ceph RGW.
- It takes into account the S3 API subset working with Ceph in order to provide the same module
behaviour where possible.
- Requires I(endpoint_url) if I(ceph=true).
aliases: ['rgw']
type: bool
default: false
requester_pays:
Expand Down Expand Up @@ -170,6 +169,9 @@
operations/API aren't implemented by the endpoint, module doesn't fail
if each parameter satisfies the following condition.
I(requester_pays) is C(False), I(policy), I(tags), and I(versioning) are C(None).
- In release 5.0.0 the I(s3_url) parameter was merged into the I(endpoint_url) parameter,
I(s3_url) remains as an alias for I(endpoint_url).
- For Walrus I(endpoint_url) should be set to the FQDN of the endpoint with neither scheme nor path.
'''

EXAMPLES = r'''
Expand All @@ -183,7 +185,7 @@
# Create a simple S3 bucket on Ceph Rados Gateway
- amazon.aws.s3_bucket:
name: mys3bucket
s3_url: http://your-ceph-rados-gateway-server.xxx
endpoint_url: http://your-ceph-rados-gateway-server.xxx
ceph: true

# Remove an S3 bucket and any keys it contains
Expand All @@ -205,7 +207,7 @@
# Create a simple DigitalOcean Spaces bucket using their provided regional endpoint
- amazon.aws.s3_bucket:
name: mydobucket
s3_url: 'https://nyc3.digitaloceanspaces.com'
endpoint_url: 'https://nyc3.digitaloceanspaces.com'

# Create a bucket with AES256 encryption
- amazon.aws.s3_bucket:
Expand Down Expand Up @@ -1044,20 +1046,21 @@ def destroy_bucket(s3_client, module):
module.exit_json(changed=True)


def is_fakes3(s3_url):
""" Return True if s3_url has scheme fakes3:// """
if s3_url is not None:
return urlparse(s3_url).scheme in ('fakes3', 'fakes3s')
def is_fakes3(endpoint_url):
""" Return True if endpoint_url has scheme fakes3:// """
if endpoint_url is not None:
return urlparse(endpoint_url).scheme in ('fakes3', 'fakes3s')
else:
return False


def get_s3_client(module, aws_connect_kwargs, location, ceph, s3_url):
if s3_url and ceph: # TODO - test this
ceph = urlparse(s3_url)
params = dict(module=module, conn_type='client', resource='s3', use_ssl=ceph.scheme == 'https', region=location, endpoint=s3_url, **aws_connect_kwargs)
elif is_fakes3(s3_url):
fakes3 = urlparse(s3_url)
def get_s3_client(module, aws_connect_kwargs, location, ceph, endpoint_url):
if ceph: # TODO - test this
ceph = urlparse(endpoint_url)
params = dict(module=module, conn_type='client', resource='s3', use_ssl=ceph.scheme == 'https',
region=location, endpoint=endpoint_url, **aws_connect_kwargs)
elif is_fakes3(endpoint_url):
fakes3 = urlparse(endpoint_url)
port = fakes3.port
if fakes3.scheme == 'fakes3s':
protocol = "https"
Expand All @@ -1071,7 +1074,7 @@ def get_s3_client(module, aws_connect_kwargs, location, ceph, s3_url):
endpoint="%s://%s:%s" % (protocol, fakes3.hostname, to_text(port)),
use_ssl=fakes3.scheme == 'fakes3s', **aws_connect_kwargs)
else:
params = dict(module=module, conn_type='client', resource='s3', region=location, endpoint=s3_url, **aws_connect_kwargs)
params = dict(module=module, conn_type='client', resource='s3', region=location, endpoint=endpoint_url, **aws_connect_kwargs)
return boto3_conn(**params)


Expand All @@ -1082,12 +1085,11 @@ def main():
policy=dict(type='json'),
name=dict(required=True),
requester_pays=dict(type='bool'),
s3_url=dict(),
state=dict(default='present', choices=['present', 'absent']),
tags=dict(type='dict', aliases=['resource_tags']),
purge_tags=dict(type='bool', default=True),
versioning=dict(type='bool'),
ceph=dict(default=False, type='bool'),
ceph=dict(default=False, type='bool', aliases=['rgw']),
encryption=dict(choices=['none', 'AES256', 'aws:kms']),
encryption_key_id=dict(),
bucket_key_enabled=dict(type='bool'),
Expand All @@ -1112,8 +1114,15 @@ def main():
['delete_object_ownership', 'object_ownership']
]

required_if = [
['ceph', True, ['endpoint_url']],
]

module = AnsibleAWSModule(
argument_spec=argument_spec, required_by=required_by, mutually_exclusive=mutually_exclusive
argument_spec=argument_spec,
required_by=required_by,
required_if=required_if,
mutually_exclusive=mutually_exclusive
)

region, _ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
Expand All @@ -1129,22 +1138,19 @@ def main():
# actually work fine for everything except us-east-1 (US Standard)
location = region

s3_url = module.params.get('s3_url')
endpoint_url = module.params.get('endpoint_url')
ceph = module.params.get('ceph')

# Look at endpoint_url and tweak connection settings
# allow eucarc environment variables to be used if ansible vars aren't set
if not s3_url and 'S3_URL' in os.environ:
s3_url = os.environ['S3_URL']

if ceph and not s3_url:
module.fail_json(msg='ceph flavour requires s3_url')
if not endpoint_url and 'S3_URL' in os.environ:
endpoint_url = os.environ['S3_URL']

# Look at s3_url and tweak connection settings
# if connecting to Ceph RGW, Walrus or fakes3
if s3_url:
if endpoint_url:
for key in ['validate_certs', 'security_token', 'profile_name']:
aws_connect_kwargs.pop(key, None)
s3_client = get_s3_client(module, aws_connect_kwargs, location, ceph, s3_url)
s3_client = get_s3_client(module, aws_connect_kwargs, location, ceph, endpoint_url)

if s3_client is None: # this should never happen
module.fail_json(msg='Unknown error, failed to create s3 connection, no information available.')
Expand Down
87 changes: 47 additions & 40 deletions plugins/modules/s3_object.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,8 @@
- Support for creating or deleting S3 buckets with this module has been deprecated and will be
removed in release 6.0.0.
- S3 buckets can be created or deleted using the M(amazon.aws.s3_bucket) module.
- Compatible with AWS, DigitalOcean, Ceph, Walrus, FakeS3 and StorageGRID.
- When using non-AWS services, I(endpoint_url) should be specified.
options:
bucket:
description:
Expand Down Expand Up @@ -141,18 +143,18 @@
default: 0
type: int
aliases: ['retry']
s3_url:
description:
- S3 URL endpoint for usage with Ceph, Eucalyptus and fakes3 etc. Otherwise assumes AWS.
type: str
dualstack:
description:
- Enables Amazon S3 Dual-Stack Endpoints, allowing S3 communications using both IPv4 and IPv6.
type: bool
default: false
rgw:
ceph:
description:
- Enable Ceph RGW S3 support. This option requires an explicit url via I(s3_url).
- Enable API compatibility with Ceph RGW.
- It takes into account the S3 API subset working with Ceph in order to provide the same module
behaviour where possible.
- Requires I(endpoint_url) if I(ceph=true).
aliases: ['rgw']
default: false
type: bool
src:
Expand Down Expand Up @@ -228,6 +230,9 @@
- "Alina Buzachis (@alinabuzachis)"
notes:
- Support for I(tags) and I(purge_tags) was added in release 2.0.0.
- In release 5.0.0 the I(s3_url) parameter was merged into the I(endpoint_url) parameter,
I(s3_url) remains as an alias for I(endpoint_url).
- For Walrus I(endpoint_url) should be set to the FQDN of the endpoint with neither scheme nor path.
extends_documentation_fragment:
- amazon.aws.aws
- amazon.aws.ec2
Expand Down Expand Up @@ -255,8 +260,8 @@
object: /my/desired/key.txt
src: /usr/local/myfile.txt
mode: put
rgw: true
s3_url: "http://localhost:8000"
ceph: true
endpoint_url: "http://localhost:8000"

- name: Simple GET operation
amazon.aws.s3_object:
Expand Down Expand Up @@ -822,20 +827,21 @@ def copy_object_to_bucket(module, s3, bucket, obj, encrypt, metadata, validate,
module.fail_json_aws(e, msg="Failed while copying object %s from bucket %s." % (obj, module.params['copy_src'].get('Bucket')))


def is_fakes3(s3_url):
""" Return True if s3_url has scheme fakes3:// """
if s3_url is not None:
return urlparse(s3_url).scheme in ('fakes3', 'fakes3s')
def is_fakes3(endpoint_url):
""" Return True if endpoint_url has scheme fakes3:// """
if endpoint_url is not None:
return urlparse(endpoint_url).scheme in ('fakes3', 'fakes3s')
else:
return False


def get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url, sig_4=False):
if s3_url and rgw: # TODO - test this
rgw = urlparse(s3_url)
params = dict(module=module, conn_type='client', resource='s3', use_ssl=rgw.scheme == 'https', region=location, endpoint=s3_url, **aws_connect_kwargs)
elif is_fakes3(s3_url):
fakes3 = urlparse(s3_url)
def get_s3_connection(module, aws_connect_kwargs, location, ceph, endpoint_url, sig_4=False):
if ceph: # TODO - test this
ceph = urlparse(endpoint_url)
params = dict(module=module, conn_type='client', resource='s3', use_ssl=ceph.scheme == 'https',
region=location, endpoint=endpoint_url, **aws_connect_kwargs)
elif is_fakes3(endpoint_url):
fakes3 = urlparse(endpoint_url)
port = fakes3.port
if fakes3.scheme == 'fakes3s':
protocol = "https"
Expand All @@ -849,7 +855,7 @@ def get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url, sig_4=F
endpoint="%s://%s:%s" % (protocol, fakes3.hostname, to_text(port)),
use_ssl=fakes3.scheme == 'fakes3s', **aws_connect_kwargs)
else:
params = dict(module=module, conn_type='client', resource='s3', region=location, endpoint=s3_url, **aws_connect_kwargs)
params = dict(module=module, conn_type='client', resource='s3', region=location, endpoint=endpoint_url, **aws_connect_kwargs)
if module.params['mode'] == 'put' and module.params['encryption_mode'] == 'aws:kms':
params['config'] = botocore.client.Config(signature_version='s3v4')
elif module.params['mode'] in ('get', 'getstr') and sig_4:
Expand Down Expand Up @@ -959,9 +965,8 @@ def main():
overwrite=dict(aliases=['force'], default='different'),
prefix=dict(default=""),
retries=dict(aliases=['retry'], type='int', default=0),
s3_url=dict(),
dualstack=dict(default='no', type='bool'),
rgw=dict(default='no', type='bool'),
ceph=dict(default=False, type='bool', aliases=['rgw']),
src=dict(type='path'),
content=dict(),
content_base64=dict(),
Expand All @@ -972,14 +977,20 @@ def main():
copy_src=dict(type='dict', options=dict(bucket=dict(required=True), object=dict(required=True), version_id=dict())),
validate_bucket_name=dict(type='bool', default=True),
)

required_if = [
['ceph', True, ['endpoint_url']],
['mode', 'put', ['object']],
['mode', 'get', ['dest', 'object']],
['mode', 'getstr', ['object']],
['mode', 'geturl', ['object']],
['mode', 'copy', ['copy_src']],
]

module = AnsibleAWSModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[['mode', 'put', ['object']],
['mode', 'get', ['dest', 'object']],
['mode', 'getstr', ['object']],
['mode', 'geturl', ['object']],
['mode', 'copy', ['copy_src']]],
required_if=required_if,
mutually_exclusive=[['content', 'content_base64', 'src']],
)

Expand All @@ -997,9 +1008,9 @@ def main():
overwrite = module.params.get('overwrite')
prefix = module.params.get('prefix')
retries = module.params.get('retries')
s3_url = module.params.get('s3_url')
endpoint_url = module.params.get('endpoint_url')
dualstack = module.params.get('dualstack')
rgw = module.params.get('rgw')
ceph = module.params.get('ceph')
src = module.params.get('src')
content = module.params.get('content')
content_base64 = module.params.get('content_base64')
Expand Down Expand Up @@ -1042,22 +1053,18 @@ def main():
module.fail_json(msg='Parameter obj cannot be used with mode=delete')

# allow eucarc environment variables to be used if ansible vars aren't set
if not s3_url and 'S3_URL' in os.environ:
s3_url = os.environ['S3_URL']
if not endpoint_url and 'S3_URL' in os.environ:
endpoint_url = os.environ['S3_URL']

if dualstack and s3_url is not None and 'amazonaws.com' not in s3_url:
if dualstack and endpoint_url is not None and 'amazonaws.com' not in endpoint_url:
module.fail_json(msg='dualstack only applies to AWS S3')

# rgw requires an explicit url
if rgw and not s3_url:
module.fail_json(msg='rgw flavour requires s3_url')

# Look at s3_url and tweak connection settings
# Look at endpoint_url and tweak connection settings
# if connecting to RGW, Walrus or fakes3
if s3_url:
if endpoint_url:
for key in ['validate_certs', 'security_token', 'profile_name']:
aws_connect_kwargs.pop(key, None)
s3 = get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url)
s3 = get_s3_connection(module, aws_connect_kwargs, location, ceph, endpoint_url)

validate = not ignore_nonexistent_bucket

Expand Down Expand Up @@ -1106,7 +1113,7 @@ def main():
try:
download_s3file(module, s3, bucket, obj, dest, retries, version=version)
except Sigv4Required:
s3 = get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url, sig_4=True)
s3 = get_s3_connection(module, aws_connect_kwargs, location, ceph, endpoint_url, sig_4=True)
download_s3file(module, s3, bucket, obj, dest, retries, version=version)

if mode == 'put':
Expand Down Expand Up @@ -1228,7 +1235,7 @@ def main():
try:
download_s3str(module, s3, bucket, obj, version=version)
except Sigv4Required:
s3 = get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url, sig_4=True)
s3 = get_s3_connection(module, aws_connect_kwargs, location, ceph, endpoint_url, sig_4=True)
download_s3str(module, s3, bucket, obj, version=version)
elif version is not None:
module.fail_json(msg="Key %s with version id %s does not exist." % (obj, version))
Expand Down