|
| 1 | +import json |
| 2 | +import logging |
| 3 | +import re |
| 4 | +import uuid |
| 5 | + |
| 6 | +import boto3 |
| 7 | +import openstack |
| 8 | + |
| 9 | + |
| 10 | +TESTCONTNAME = "scs-test-container" |
| 11 | +EC2MARKER = "TmpMandSvcTest" |
| 12 | + |
| 13 | +logger = logging.getLogger(__name__) |
| 14 | +# NOTE suppress excessive logging (who knows what sensitive data might be in there) |
| 15 | +# For the time being, I don't know where else to do it, but I guess it's fine. |
| 16 | +logging.getLogger('botocore').setLevel(logging.WARNING) |
| 17 | +logging.getLogger('boto3.resources').setLevel(logging.WARNING) |
| 18 | + |
| 19 | + |
| 20 | +def compute_scs_0123_service_presence(services_lookup, *names): |
| 21 | + services = [] |
| 22 | + for name in names: |
| 23 | + services.extend(services_lookup.get(name, ())) |
| 24 | + if not services: |
| 25 | + logger.error(f"No service of type(s) {', '.join(names)} found.") |
| 26 | + return bool(services) |
| 27 | + |
| 28 | + |
| 29 | +def s3_conn(creds, conn): |
| 30 | + """Return an s3 client conn""" |
| 31 | + cacert = conn.config.config.get("cacert") |
| 32 | + # TODO: Handle self-signed certs (from ca_cert in openstack config) |
| 33 | + if cacert: |
| 34 | + logger.warning(f"Trust all Certificates in S3, OpenStack uses {cacert}") |
| 35 | + return boto3.resource( |
| 36 | + 's3', endpoint_url=creds["HOST"], verify=not cacert, |
| 37 | + aws_access_key_id=creds["AK"], aws_secret_access_key=creds["SK"], |
| 38 | + ) |
| 39 | + |
| 40 | + |
| 41 | +def _parse_blob(cred): |
| 42 | + try: |
| 43 | + return json.loads(cred.blob) |
| 44 | + except Exception as exc: |
| 45 | + logger.debug(f"unable to parse credential {cred!r}: {exc!r}") |
| 46 | + return None |
| 47 | + |
| 48 | + |
| 49 | +def get_usable_credentials(conn): |
| 50 | + """ |
| 51 | + get all ec2 credentials for this project that carry meaningful data |
| 52 | +
|
| 53 | + returns list of pairs (credential, parsed ec2 data) |
| 54 | + """ |
| 55 | + project_id = conn.identity.get_project_id() |
| 56 | + candidates = [ |
| 57 | + (cred, _parse_blob(cred)) |
| 58 | + for cred in conn.identity.credentials() |
| 59 | + if cred.type == "ec2" and cred.project_id == project_id |
| 60 | + ] |
| 61 | + return [ |
| 62 | + (cred, parsed) |
| 63 | + for cred, parsed in candidates |
| 64 | + if parsed and parsed.get('access') and parsed.get('secret') |
| 65 | + ] |
| 66 | + |
| 67 | + |
| 68 | +def remove_leftovers(usable_credentials, conn): |
| 69 | + """ |
| 70 | + makes sure to delete any leftover set of ec2 credentials |
| 71 | + """ |
| 72 | + result = [] |
| 73 | + for item in usable_credentials: |
| 74 | + cred, parsed = item |
| 75 | + if parsed.get("owner") == EC2MARKER: |
| 76 | + logger.debug(f"Removing leftover credential {parsed['access']}") |
| 77 | + conn.identity.delete_credential(cred) |
| 78 | + else: |
| 79 | + result.append(item) |
| 80 | + return result |
| 81 | + |
| 82 | + |
| 83 | +def ensure_ec2_credentials(usable_credentials, conn): |
| 84 | + if usable_credentials: |
| 85 | + return usable_credentials |
| 86 | + parsed = { |
| 87 | + "access": uuid.uuid4().hex, |
| 88 | + "secret": uuid.uuid4().hex, |
| 89 | + "owner": EC2MARKER, |
| 90 | + } |
| 91 | + blob = json.dumps(parsed) |
| 92 | + try: |
| 93 | + crd = conn.identity.create_credential( |
| 94 | + type="ec2", blob=blob, user_id=conn.current_user_id, project_id=conn.current_project_id, |
| 95 | + ) |
| 96 | + except BaseException: |
| 97 | + logger.warning("ec2 creds creation failed", exc_info=True) |
| 98 | + raise |
| 99 | + usable_credentials.append((crd, parsed)) |
| 100 | + return usable_credentials # also return for chaining |
| 101 | + |
| 102 | + |
| 103 | +def s3_from_ostack(usable_credentials, conn, rgx=re.compile(r"^(https*://[^/]*)/")): |
| 104 | + """Set creds from openstack swift/keystone""" |
| 105 | + # just use the first usable set of ec2 credentials |
| 106 | + _, parsed = usable_credentials[0] |
| 107 | + s3_creds = { |
| 108 | + "AK": parsed["access"], |
| 109 | + "SK": parsed["secret"], |
| 110 | + } |
| 111 | + m = rgx.match(conn.object_store.get_endpoint()) |
| 112 | + if m: |
| 113 | + s3_creds["HOST"] = m.group(1) |
| 114 | + return s3_creds |
| 115 | + |
| 116 | + |
| 117 | +def compute_scs_0123_swift_s3(conn: openstack.connection.Connection): |
| 118 | + # we assume s3 is accessable via the service catalog, and Swift might exist too |
| 119 | + usable_credentials = [] |
| 120 | + s3_buckets = [] |
| 121 | + # Get S3 endpoint (swift) and ec2 creds from OpenStack (keystone) |
| 122 | + try: |
| 123 | + usable_credentials = remove_leftovers(get_usable_credentials(conn), conn) |
| 124 | + s3_creds = s3_from_ostack(ensure_ec2_credentials(usable_credentials, conn), conn) |
| 125 | + |
| 126 | + # This is to be used for local debugging purposes ONLY |
| 127 | + # logger.debug(f"using credentials {s3_creds}") |
| 128 | + |
| 129 | + s3 = s3_conn(s3_creds, conn) |
| 130 | + buckets = list(s3.buckets.all()) |
| 131 | + if not buckets: |
| 132 | + s3.create_bucket(Bucket=TESTCONTNAME) |
| 133 | + buckets = list(s3.buckets.all()) |
| 134 | + if not buckets: |
| 135 | + raise RuntimeError("failed to create S3 bucket") |
| 136 | + |
| 137 | + # actual test: buckets must equal containers (sort in case the order is different) |
| 138 | + s3_buckets = sorted([b.name for b in buckets]) |
| 139 | + sw_containers = sorted([c.name for c in conn.object_store.containers()]) |
| 140 | + if s3_buckets == sw_containers: |
| 141 | + return True |
| 142 | + logger.error( |
| 143 | + "S3 buckets and Swift cntainers differ:\n" |
| 144 | + f"S3: {s3_buckets}\n" |
| 145 | + f"SW: {sw_containers}" |
| 146 | + ) |
| 147 | + return False |
| 148 | + finally: |
| 149 | + # Cleanup created S3 bucket |
| 150 | + if TESTCONTNAME in s3_buckets: |
| 151 | + s3.delete_bucket(Bucket=TESTCONTNAME) |
| 152 | + # Clean up ec2 cred IF we created one |
| 153 | + remove_leftovers(usable_credentials, conn) |
0 commit comments